repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
PatrickOReilly/scikit-learn
|
examples/mixture/plot_bayesian_gaussian_mixture.py
|
2
|
4427
|
"""
======================================================
Bayesian Gaussian Mixture Concentration Prior Analysis
======================================================
Plot the resulting ellipsoids of a mixture of three Gaussians with
variational Bayesian Gaussian Mixture for three different values on the
prior the dirichlet concentration.
For all models, the Variationnal Bayesian Gaussian Mixture adapts its number of
mixture automatically. The parameter `dirichlet_concentration_prior` has a
direct link with the resulting number of components. Specifying a high value of
`dirichlet_concentration_prior` leads more often to uniformly-sized mixture
components, while specifying small (under 0.1) values will lead to some mixture
components getting almost all the points while most mixture components will be
centered on just a few of the remaining points.
"""
# Author: Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
v, w = np.linalg.eigh(covars[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(2) * np.sqrt(v)
ell = mpl.patches.Ellipse(means[n, :2], v[0], v[1], 180 + angle)
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, dirichlet_concentration_prior, X, y, plot_title=False):
estimator.dirichlet_concentration_prior = dirichlet_concentration_prior
estimator.fit(X)
ax1.set_title("Bayesian Gaussian Mixture for "
r"$dc_0=%.1e$" % dirichlet_concentration_prior)
# ax1.axis('equal')
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k - .45, w, width=0.9, color='royalblue', zorder=3)
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left='off',
right='off', labelleft='off')
ax2.tick_params(axis='x', which='both', top='off')
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters
random_state = 2
n_components, n_features = 3, 2
colors = np.array(['mediumseagreen', 'royalblue', 'r', 'gold',
'orchid', 'indigo', 'darkcyan', 'tomato'])
dirichlet_concentration_prior = np.logspace(-3, 3, 3)
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# Here we put beta_prior to 0.8 to minimize the influence of the prior for this
# dataset
estimator = BayesianGaussianMixture(n_components=2 * n_components,
init_params='random', max_iter=1500,
mean_precision_prior=.8, tol=1e-9,
random_state=random_state)
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([j * np.ones(samples[j], dtype=int)
for j in range(n_components)])
# Plot Results
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.05, wspace=.05,
left=.03, right=.97)
gs = gridspec.GridSpec(3, len(dirichlet_concentration_prior))
for k, dc in enumerate(dirichlet_concentration_prior):
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]),
estimator, dc, X, y, plot_title=k == 0)
plt.show()
|
bsd-3-clause
|
ai-se/Transfer-Learning
|
src/planners/plan.py
|
1
|
3609
|
import os
import sys
# Update path
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
import pandas as pd
from pdb import set_trace
from tools.decision_tree import DecisionTree
from utils.misc_utils import flatten
from utils.experiment_utils import Changes
from utils.file_util import list2dataframe
from random import uniform, random as rand
class Plan:
def __init__(self, trainDF, testDF, tree=None, config=False):
self.trainDF = trainDF
self.testDF = testDF
self.tree = tree
self.change = []
def leaves(self, node):
"""
Returns all terminal nodes.
"""
L = []
if len(node.kids) > 1:
for l in node.kids:
L.extend(self.leaves(l))
return L
elif len(node.kids) == 1:
return [node.kids]
else:
return [node]
def find(self, testInst, t):
if len(t.kids) == 0:
return t
for kid in t.kids:
try:
if kid.val[0] <= testInst[kid.f].values[0] < kid.val[1]:
return self.find(testInst, kid)
elif kid.val[1] == testInst[kid.f].values[0] == \
self.trainDF.describe()[kid.f]['max']:
return self.find(testInst, kid)
except:
return self.find(testInst, kid)
return t
@staticmethod
def howfar(me, other):
# set_trace()
common = [a[0] for a in me.branch if a[0] in [o[0] for o in other.branch]]
return len(me.branch) - len(common)
def patchIt(self, testInst, config=False):
C = Changes() # Record changes
testInst = pd.DataFrame(testInst).transpose()
current = self.find(testInst, self.tree)
node = current
while node.lvl > -1:
node = node.up # Move to tree root
leaves = flatten([self.leaves(_k) for _k in node.kids])
try:
if self.config:
best = sorted([l for l in leaves if l.score <= 0.9 * current.score],
key=lambda F: self.howfar(current, F))[0]
else:
best = \
sorted(
[l for l in leaves if l.score == 0 ],
key=lambda F: self.howfar(current, F))[0]
# set_trace()
except:
return testInst.values.tolist()[0]
def new(old, range):
rad = abs(min(range[1] - old, old - range[1]))
return abs(range[0]), abs(range[1])
# return uniform(range[0], range[1])
for ii in best.branch:
before = testInst[ii[0]]
if not ii in current.branch:
then = testInst[ii[0]].values[0]
now = ii[1] if self.config else new(testInst[ii[0]].values[0],
ii[1])
# print(now)
testInst[ii[0]] = str(now)
# C.save(name=ii[0], old=then, new=now)
testInst[testInst.columns[-1]] = 1
# self.change.append(C.log)
return testInst.values.tolist()[0]
def transfrom(self):
newRows = []
for n in range(self.testDF.shape[0]):
if self.testDF.iloc[n][-1] > 0 or self.testDF.iloc[n][-1] == True:
newRows.append(self.patchIt(self.testDF.iloc[n]))
else:
newRows.append(self.testDF.iloc[n].values.tolist())
return pd.DataFrame(newRows, columns=self.testDF.columns)
|
unlicense
|
murali-munna/scikit-learn
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
307
|
1974
|
""" test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
bsd-3-clause
|
stefanseibert/DataMining
|
experiment01/report/Skripts/energyPrediction.py
|
2
|
2528
|
import pandas
from sklearn import svm, cross_validation, feature_selection
from numpy import *
import matplotlib.pyplot as plt
NUM_CLUSTER = 4
ENERGYFORMS = ['Oil', 'Gas', 'Coal', 'Nuclear', 'Hydro']
TARGET = ['CO2Emm']
NUM_ENERGYFORMS = len(ENERGYFORMS)
def crossValidateRegression(X, y, C, epsilon):
svr = svm.SVR(kernel='linear', C=C, epsilon=epsilon)
crossValidator = cross_validation.KFold(NUM_COUNTRYS, n_folds = 10)
cross_validation.cross_val_score(svr, X, y, cv=crossValidator, scoring='mean_squared_error')
svr.fit(X, y)
predictedTargetInfo = svr.predict(X)
return predictedTargetInfo
def getMae(actual, predicted):
return sum(abs(actual - predicted))/len(actual)
def plot(actual, predicted, C, epsilon, mae, colors=['g', 'r']):
plt.figure()
plt.plot(actual, color=colors[0], label='actual')
plt.plot(predicted, color=colors[1], label='predicted')
plt.legend()
plt.annotate("C = "+str(C), xy=(2.5,7500))
plt.annotate(u'\u03B5'+" = "+unicode(epsilon), xy=(2.5,7100))
plt.annotate("MAE = " + str(mae), xy=(2.5,6700))
plt.show()
def getInfo(actual, predicted, indices):
frame = pandas.DataFrame(data=actual, index=indices, columns=['actual'])
frame['predicted'] = pandas.Series(data=predicted, index=indices)
frame['difference'] = actual - predicted
return frame
energyInfo = pandas.read_csv('../resources/EnergyMixGeo.csv')
reducedEnergyInfo = energyInfo[ENERGYFORMS]
targetInfo = energyInfo[TARGET].values[:,0]
NUM_COUNTRYS = len(reducedEnergyInfo.values)
NUM_ITERATIONS = 4
C=[float(10)**-x for x in range(NUM_ITERATIONS)]
epsilon=[float(10)**-x for x in range(1,NUM_ITERATIONS+1)]
for i in range(NUM_ITERATIONS):
predictedTargetInfo = crossValidateRegression(reducedEnergyInfo, targetInfo, C[i], epsilon[i])
mae = getMae(targetInfo, predictedTargetInfo)
print "\nMean Absolute Error: " + str(mae) + ", C = " + str(C[i]) + ", epsilon = " + str(epsilon[i])
#plot(targetInfo, predictedTargetInfo, C, epsilon, mae)
#print getInfo(targetInfo, predictedTargetInfo, energyInfo.Country.values)
#Mean Absolute Error: 0.119938469138, C = 1, epsilon = 0.01
#Mean Absolute Error: 0.119995514827, C = 1, epsilon = 0.001
#Mean Absolute Error: 0.119986240023, C = 1, epsilon = 0.0001
#Mean Absolute Error: 0.124915412379, C = 1.0, epsilon = 0.1
#Mean Absolute Error: 0.119776387503, C = 0.1, epsilon = 0.01
#Mean Absolute Error: 0.11925998931, C = 0.01, epsilon = 0.001
#Mean Absolute Error: 64.9026381798, C = 0.001, epsilon = 0.0001
|
mit
|
Gljivius/SemSegmentacijaUtakmice
|
SemSegmentacija/eval_helper.py
|
2
|
4934
|
import os
import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import skimage as ski
import skimage.io
def draw_output(y, class_colors, save_path):
width = y.shape[1]
height = y.shape[0]
y_rgb = np.empty((height, width, 3), dtype=np.uint8)
for cid in range(len(class_colors)):
cpos = np.repeat((y == cid).reshape((height, width, 1)), 3, axis=2)
cnum = cpos.sum() // 3
y_rgb[cpos] = np.array(class_colors[cid][:3] * cnum, dtype=np.uint8)
#pixels = y_rgb[[np.repeat(np.equal(y, cid).reshape((height, width, 1)), 3, axis=2)]
#if pixels.size > 0:
# #pixels.reshape((-1, 3))[:,:] = class_colors[cid][:3]
# #pixels.resize((int(pixels.size/3), 3))
# print(np.array(class_colors[cid][:3] * (pixels.size // 3), dtype=np.uint8))
# pixels = np.array(class_colors[cid][:3] * (pixels.size // 3), dtype=np.uint8)
#y_rgb[np.repeat(np.equal(y, cid).reshape((height, width, 1)), 3, axis=2)].reshape((-1, 3)) = \
# class_colors[cid][:3]
ski.io.imsave(save_path, y_rgb)
def collect_confusion_matrix(y, yt, conf_mat):
for i in range(y.size):
l = y[i]
lt = yt[i]
if lt >= 0:
conf_mat[l,lt] += 1
def compute_errors(conf_mat, name, class_info, verbose=True):
num_correct = conf_mat.trace()
num_classes = conf_mat.shape[0]
total_size = conf_mat.sum()
avg_pixel_acc = num_correct / total_size * 100.0
TPFN = conf_mat.sum(0)
TPFP = conf_mat.sum(1)
FN = TPFN - conf_mat.diagonal()
FP = TPFP - conf_mat.diagonal()
class_iou = np.zeros(num_classes)
class_recall = np.zeros(num_classes)
class_precision = np.zeros(num_classes)
if verbose:
print(name + ' errors:')
for i in range(num_classes):
TP = conf_mat[i,i]
class_iou[i] = (TP / (TP + FP[i] + FN[i])) * 100.0
if TPFN[i] > 0:
class_recall[i] = (TP / TPFN[i]) * 100.0
else:
class_recall[i] = 0
if TPFP[i] > 0:
class_precision[i] = (TP / TPFP[i]) * 100.0
else:
class_precision[i] = 0
class_name = class_info[i][3]
if verbose:
print('\t%s IoU accuracy = %.2f %%' % (class_name, class_iou[i]))
avg_class_iou = class_iou.mean()
avg_class_recall = class_recall.mean()
avg_class_precision = class_precision.mean()
if verbose:
print(name + ' pixel accuracy = %.2f %%' % avg_pixel_acc)
print(name + ' IoU mean class accuracy - TP / (TP+FN+FP) = %.2f %%' % avg_class_iou)
print(name + ' mean class recall - TP / (TP+FN) = %.2f %%' % avg_class_recall)
print(name + ' mean class precision - TP / (TP+FP) = %.2f %%' % avg_class_precision)
return avg_pixel_acc, avg_class_iou, avg_class_recall, avg_class_precision, total_size
def plot_training_progress(save_dir, loss, iou, pixel_acc):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16,8))
linewidth = 2
legend_size = 6
title_size = 10
train_color = 'm'
val_color = 'c'
x_data = np.linspace(1, len(loss[0]), len(loss[0]))
ax1.set_title('cross entropy loss', fontsize=title_size)
ax1.plot(x_data, loss[0], marker='o', color=train_color, linewidth=linewidth, linestyle='-', \
label='train')
ax1.plot(x_data, loss[1], marker='o', color=val_color, linewidth=linewidth, linestyle='-',
label='validation')
ax1.legend(loc='upper right', fontsize=legend_size)
ax2.set_title('IoU accuracy')
ax2.plot(x_data, iou[0], marker='o', color=train_color, linewidth=linewidth, linestyle='-',
label='train')
ax2.plot(x_data, iou[1], marker='o', color=val_color, linewidth=linewidth, linestyle='-',
label='validation')
ax2.legend(loc='upper left', fontsize=legend_size)
ax3.set_title('pixel accuracy')
ax3.plot(x_data, pixel_acc[0], marker='o', color=train_color, linewidth=linewidth, linestyle='-',
label='train')
ax3.plot(x_data, pixel_acc[1], marker='o', color=val_color, linewidth=linewidth, linestyle='-',
label='validation')
ax3.legend(loc='upper left', fontsize=legend_size)
#ax4.set_title('')
#plt.figure(fig.number)
#plt.clf()
#plt.plot(x_data, data, 'b-')
#plt.axis([0, 6, 0, 20])
#plt.show()
#plt.draw()
#plt.show(block=False)
#plt.savefig('training_plot.pdf', bbox_inches='tight')
save_path = os.path.join(save_dir, 'training_plot.pdf')
print('Plotting in: ', save_path)
plt.savefig(save_path)
def map_cityscapes_to_kitti(y, id_map):
y_kitti = np.zeros(y.shape, dtype=y.dtype)
for i in range(len(id_map)):
#print(i , ' --> ', id_map[i])
#print(np.equal(y, i).sum(), '\n')
#print('sum')
#print(y[np.equal(y, i)].sum())
y_kitti[np.equal(y, i)] = id_map[i]
#print(np.equal(y, id_map[i]).sum())
#y[np.equal(y, i)] = 2
#print(y[np.equal(y, i)].sum())
return y_kitti
#def plot_accuracy(fig, data):
# x_data = np.linspace(1, len(data), len(data))
# plt.figure(fig.number)
# plt.clf()
# plt.plot(x_data, data, 'b-')
# plt.savefig(str(fig.number) + '_plot.pdf', bbox_inches='tight')
|
bsd-3-clause
|
jseabold/statsmodels
|
statsmodels/tsa/base/prediction.py
|
4
|
6163
|
import pandas as pd
import numpy as np
from scipy import stats
class PredictionResults(object):
"""
Prediction results
Parameters
----------
predicted_mean : {ndarray, Series, DataFrame}
The predicted mean values
var_pred_mean : {ndarray, Series, DataFrame}
The variance of the predicted mean values
dist : {None, "norm", "t", rv_frozen}
The distribution to use when constructing prediction intervals.
Default is normal.
df : int, optional
The degree of freedom parameter for the t. Not used if dist is None,
"norm" or a callable.
row_labels : {Sequence[Hashable], pd.Index}
Row labels to use for the summary frame. If None, attempts to read the
index of ``predicted_mean``
"""
def __init__(
self,
predicted_mean,
var_pred_mean,
dist=None,
df=None,
row_labels=None,
):
self._predicted_mean = np.asarray(predicted_mean)
self._var_pred_mean = np.asarray(var_pred_mean)
self._df = df
self._row_labels = row_labels
if row_labels is None:
self._row_labels = getattr(predicted_mean, "index", None)
self._use_pandas = self._row_labels is not None
if dist != "t" and df is not None:
raise ValueError('df must be None when dist is not "t"')
if dist is None or dist == "norm":
self.dist = stats.norm
self.dist_args = ()
elif dist == "t":
self.dist = stats.t
self.dist_args = (self._df,)
elif isinstance(dist, stats.distributions.rv_frozen):
self.dist = dist
self.dist_args = ()
else:
raise ValueError('dist must be a None, "norm", "t" or a callable.')
def _wrap_pandas(self, value, name=None, columns=None):
if not self._use_pandas:
return value
if value.ndim == 1:
return pd.Series(value, index=self._row_labels, name=name)
return pd.DataFrame(value, index=self._row_labels, columns=columns)
@property
def row_labels(self):
"""The row labels used in pandas-types."""
return self._row_labels
@property
def predicted_mean(self):
"""The predicted mean"""
return self._wrap_pandas(self._predicted_mean, "predicted_mean")
@property
def var_pred_mean(self):
"""The variance of the predicted mean"""
if self._var_pred_mean.ndim > 2:
return self._var_pred_mean
return self._wrap_pandas(self._var_pred_mean, "var_pred_mean")
@property
def se_mean(self):
"""The standard deviation of the predicted mean"""
ndim = self._var_pred_mean.ndim
if ndim == 1:
values = np.sqrt(self._var_pred_mean)
elif ndim == 3:
values = np.sqrt(self._var_pred_mean.T.diagonal())
else:
raise NotImplementedError("var_pre_mean must be 1 or 3 dim")
return self._wrap_pandas(values, "mean_se")
@property
def tvalues(self):
"""The ratio of the predicted mean to its standard deviation"""
val = self.predicted_mean / self.se_mean
if isinstance(val, pd.Series):
val.name = "tvalues"
return val
def t_test(self, value=0, alternative="two-sided"):
"""
z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
"""
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args) * 2
elif alternative in ["larger", "l"]:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ["smaller", "s"]:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError("invalid alternative")
return stat, pvalue
def conf_int(self, alpha=0.05):
"""
Confidence interval construction for the predicted mean.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval.
The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pi : {ndarray, DataFrame}
The array has the lower and the upper limit of the prediction
interval in the columns.
"""
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2.0, *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
if self._use_pandas:
return self._wrap_pandas(ci, columns=["lower", "upper"])
return ci
def summary_frame(self, alpha=0.05):
"""
Summary frame of mean, variance and confidence interval.
Returns
-------
DataFrame
DataFrame containing four columns:
* mean
* mean_se
* mean_ci_lower
* mean_ci_upper
Notes
-----
Fixes alpha to 0.05 so that the confidence interval should have 95%
coverage.
"""
ci_mean = np.asarray(self.conf_int(alpha=alpha))
lower, upper = ci_mean[:, 0], ci_mean[:, 1]
to_include = {
"mean": self.predicted_mean,
"mean_se": self.se_mean,
"mean_ci_lower": lower,
"mean_ci_upper": upper,
}
return pd.DataFrame(to_include)
|
bsd-3-clause
|
Myasuka/scikit-learn
|
sklearn/cross_validation.py
|
96
|
58309
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
JeanKossaifi/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
232
|
1206
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
basaks/PyKrige
|
setup.py
|
1
|
5625
|
from __future__ import absolute_import
from __future__ import print_function
"""
Updated BSM 10/23/2015
Cython extensions work-around adapted from simplejson setup script:
https://github.com/simplejson/simplejson/blob/0bcdf20cc525c1343b796cb8f247ea5213c6557e/setup.py#L110
"""
import sys
from os.path import join
from setuptools import setup, Extension
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
NAME = 'PyKrige'
VERSION = '1.4.dev0'
AUTHOR = 'Benjamin S. Murphy'
EMAIL = '[email protected]'
URL = 'https://github.com/bsmurphy/PyKrige'
DESC = 'Kriging Toolkit for Python'
LDESC = 'PyKrige is a kriging toolkit for Python that supports two- and ' \
'three-dimensional ordinary and universal kriging.'
PACKAGES = ['pykrige']
PCKG_DAT = {'pykrige': ['README.md', 'CHANGELOG.md', 'LICENSE.txt', 'MANIFEST.in',
join('test_data', '*.txt'), join('test_data', '*.asc')]}
REQ = ['numpy', 'scipy', 'matplotlib']
for req in REQ:
try:
__import__(req)
except ImportError:
print("**************************************************")
print("Error: PyKrige relies on the installation of the SciPy stack "
"(Numpy, SciPy, matplotlib) to work. "
"For instructions for installation, please view "
"https://www.scipy.org/install.html."
"\n {} missing".format(req)
)
print("**************************************************")
raise
sys.exit(1)
# python setup.py install goes through REQ in reverse order than pip
CLSF = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: GIS']
# Removed python 3 switch from here
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = False
try_cython = True
except ImportError:
print("**************************************************")
print("WARNING: Cython is not currently installed. "
"Falling back to pure Python implementation.")
print("**************************************************")
try_cython = False
class BuildFailed(Exception):
pass
# This is how I was originally trying to get around the Cython extension troubles...
# Keeping it here for reference...
#
# class BuildExtCompilerCheck(build_ext):
# def build_extensions(self):
# if sys.platform == 'win32' and ('MSC' in sys.version or 'MSVC' in sys.version):
# print("-> COMPILER IS", self.compiler.compiler_type)
# from distutils.msvccompiler import MSVCCompiler
# if isinstance(self.compiler, MSVCCompiler):
# build_ext.build_extensions(self)
# else:
# print("WARNING: The C extensions will not be built since the necessary compiler could not be found.\n"
# "See https://github.com/bsmurphy/PyKrige/issues/8")
# else:
# build_ext.build_extensions(self)
def run_setup(with_cython):
if with_cython:
import numpy as np
if sys.platform != 'win32':
compile_args = dict(extra_compile_args=['-O2', '-march=core2', '-mtune=corei7'],
extra_link_args=['-O2', '-march=core2', '-mtune=corei7'])
else:
compile_args = {}
ext_modules = [Extension("pykrige.lib.cok", ["pykrige/lib/cok.pyx"], **compile_args),
Extension("pykrige.lib.variogram_models", ["pykrige/lib/variogram_models.pyx"], **compile_args)]
# Transfered python 3 switch here. On python 3 machines, will use lapack_py3.pyx
# instead of lapack.pyx to build .lib.lapack
if sys.version_info[0] == 3:
ext_modules += [Extension("pykrige.lib.lapack", ["pykrige/lib/lapack_py3.pyx"], **compile_args)]
else:
ext_modules += [Extension("pykrige.lib.lapack", ["pykrige/lib/lapack.pyx"], **compile_args)]
class TryBuildExt(build_ext):
def build_extensions(self):
try:
build_ext.build_extensions(self)
except ext_errors:
print("**************************************************")
print("WARNING: Cython extensions failed to build. Falling back to pure Python implementation.\n"
"See https://github.com/bsmurphy/PyKrige/issues/8 for more information.")
print("**************************************************")
raise BuildFailed()
cmd = {'build_ext': TryBuildExt}
setup(name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, url=URL, description=DESC,
long_description=LDESC, packages=PACKAGES, package_data=PCKG_DAT, classifiers=CLSF,
ext_modules=ext_modules, include_dirs=[np.get_include()], cmdclass=cmd)
else:
setup(name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, url=URL, description=DESC,
long_description=LDESC, packages=PACKAGES, package_data=PCKG_DAT, classifiers=CLSF)
try:
run_setup(try_cython)
except BuildFailed:
run_setup(False)
|
bsd-3-clause
|
ywcui1990/nupic.research
|
projects/sp_paper/plot_nyc_taxi_performance.py
|
6
|
3556
|
from matplotlib import pyplot as plt
from htmresearch.support.sequence_learning_utils import *
import pandas as pd
from pylab import rcParams
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
rcParams.update({'figure.figsize': (12, 6)})
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
window = 960
skipTrain = 10000
figPath = './result/'
def getDatetimeAxis():
"""
use datetime as x-axis
"""
dataSet = 'nyc_taxi'
filePath = './data/' + dataSet + '.csv'
data = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['datetime', 'value', 'timeofday', 'dayofweek'])
xaxisDate = pd.to_datetime(data['datetime'])
return xaxisDate
def computeAltMAPE(truth, prediction, startFrom=0):
return np.nanmean(np.abs(truth[startFrom:] - prediction[startFrom:]))/np.nanmean(np.abs(truth[startFrom:]))
def computeNRMSE(truth, prediction, startFrom=0):
squareDeviation = computeSquareDeviation(prediction, truth)
squareDeviation[:startFrom] = None
return np.sqrt(np.nanmean(squareDeviation))/np.nanstd(truth)
def loadExperimentResult(filePath):
expResult = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['step', 'value', 'prediction5'])
groundTruth = np.roll(expResult['value'], -5)
prediction5step = np.array(expResult['prediction5'])
return (groundTruth, prediction5step)
if __name__ == "__main__":
xaxisDate = getDatetimeAxis()
### Figure 2: Continuous LSTM with different window size
fig = plt.figure()
dataSet = 'nyc_taxi'
classifierType = 'SDRClassifierRegion'
filePath = './prediction/' + dataSet + '_TM_pred.csv'
trainSPList = [False, True, True]
boost = [1, 1, 10]
mapeTMList1 = []
mapeTMList2 = []
for i in range(len(boost)):
tmPrediction = np.load(
'./results/nyc_taxi/{}{}TMprediction_SPLearning_{}_boost_{}.npz'.format(
dataSet, classifierType, trainSPList[i], boost[i]))
tmPredictionLL = tmPrediction['arr_0']
tmPointEstimate = tmPrediction['arr_1']
tmTruth = tmPrediction['arr_2']
# encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
# negLL = computeLikelihood(tmPredictionLL, tmTruth, encoder)
# negLL[:50] = None
# negLLTM = plotAccuracy((negLL, xaxisDate), tmTruth,
# window=window, errorType='negLL', label='HTM_trainSP_{}'.format(trainSP))
#
absDiff = np.abs(tmTruth - tmPointEstimate)
mapeTM = plotAccuracy((absDiff, xaxisDate),
tmTruth,
skipRecordNum = 20,
window=window,
errorType='mape',
label='TrainSP_{}, r={}'.format(trainSPList[i], boost[i]))
normFactor = np.nanmean(np.abs(tmTruth))
mapeTMList1.append(np.nanmean(mapeTM[:10000]) / normFactor)
mapeTMList2.append(np.nanmean(mapeTM[10000:]) / normFactor)
altMAPETM = computeAltMAPE(tmTruth, tmPointEstimate, 10000)
print "trainSP {} MAPE {}".format(trainSPList[i], altMAPETM)
plt.legend()
plt.savefig('figures/nyc_taxi_performance.pdf')
rcParams.update({'figure.figsize': (4, 6)})
plt.figure()
plt.subplot(221)
plt.bar(range(3), mapeTMList1)
plt.ylim([0, .14])
plt.subplot(222)
plt.bar(range(3), mapeTMList2)
plt.ylim([0, .14])
plt.savefig('figures/nyc_taxi_performance_summary.pdf')
|
agpl-3.0
|
dawenl/stochastic_PMF
|
code/HartiganOnline.py
|
1
|
2619
|
#!/usr/bin/env python
import numpy as np
from sklearn.base import BaseEstimator
class HartiganOnline(BaseEstimator):
'''Online Hartigan clustering.'''
def __init__(self, n_clusters=2, max_iter=10, shuffle=True, verbose=False):
'''Initialize a Hartigan clusterer.
:parameters:
- n_clusters : int
Number of clusters
- max_iter : int
Maximum number of passes through the data
- shuffle : bool
Shuffle the data between each pass
- verbose : bool
Display debugging output?
:variables:
- cluster_centers_ : ndarray, shape=(n_clusters, d)
Estimated cluster centroids
- cluster_sizes_ : ndarray, shape=(n_clusters)
Size (number of points) for each cluster
'''
self.n_clusters = n_clusters
self.max_iter = max_iter
self.shuffle = shuffle
self.verbose = verbose
self.cluster_sizes_ = np.zeros(self.n_clusters)
def fit(self, X):
'''Fit the cluster centers.
:parameters:
- X : ndarray, size=(n, d)
The data to be clustered
'''
n, d = X.shape
# Initialize the cluster centers, costs, sizes
self.cluster_centers_ = np.zeros( (self.n_clusters, d), dtype=X.dtype)
step = 0
idx = np.arange(n)
while step < self.max_iter:
step = step + 1
# Should we shuffle the data?
if self.shuffle:
np.random.shuffle(idx)
self.partial_fit(X[idx])
def partial_fit(self, X):
'''Partial fit the cluster centers'''
n, d = X.shape
if not hasattr(self, 'cluster_centers_'):
self.cluster_centers_ = np.zeros( (self.n_clusters, d), dtype=X.dtype)
balances = self.cluster_sizes_ / (1.0 + self.cluster_sizes_)
norms = np.sum(self.cluster_centers_**2, axis=1)
for xi in X:
# Get the closest cluster center
j = np.argmin(balances * (np.sum(xi**2) + norms - 2 * self.cluster_centers_.dot(xi)))
# Update the center
self.cluster_centers_[j] = (self.cluster_sizes_[j] * self.cluster_centers_[j] + xi) / (1.0 + self.cluster_sizes_[j])
# Update the counter
self.cluster_sizes_[j] += 1.0
# Update the balance
balances[j] = self.cluster_sizes_[j] / (1.0 + self.cluster_sizes_[j])
# Update the norms
norms[j] = np.sum(self.cluster_centers_[j]**2)
|
gpl-3.0
|
dnut/associations
|
associations/analysis.py
|
1
|
12742
|
from itertools import chain
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
import os
import textwrap
from .libassoc import invert, istr, iint, pretty, make_dir
class Analysis():
""" Ideally this class would implement totally generic methods
that can be used to analyze any Histogram() and Associations()
objects. This class is an attempt at that but it is still
somewhat specialized and may need adaptation for general use.
"""
def __init__(self, histogram, assoc, output_dir='output', plot_format='pdf'):
self.hist = histogram
self.assoc = assoc
self.plot_counter = 0
self.gen_assoc = {}
self.output_dir = output_dir
self.plot_dir = make_dir(output_dir, 'plots')
self.maxes = [('', 1), ('', 1), ('', 1)]
self.mins = [('', 1), ('', 1), ('', 1)]
self.time = ('age', 'weekday', 'season')
self.plot_format = plot_format
def percent(self, count, total=65499):
""" Basic percent calculator outputting nice string """
return istr(100*count/total) + '%'
def percentify(self, pairs, total=65499):
""" Use on the list of tuples to convert ratios to percentages """
return tuple((a, self.percent(b, total=total)) for (a, b) in pairs)
def bin_sort(self, bins):
""" Sort bins according to value of longest int(string[0:?]) """
def value(s):
i = iint(s[0]) if isinstance(s, tuple) else iint(s)
return i if i != None else float('inf')
return sorted(bins, key=value)
def most_common(self, test_group, *subpop):
""" Rank test_group by greatest occurrence within subpop. """
fields = [self.hist.field_index[val] for val in subpop]
hist = self.hist.simplify(test_group, *fields)
data = [
(test, hist.get(test, *subpop))
for test in hist.valists[hist.fieldict[test_group]]
]
return self.percentify(
sorted(data, key=lambda x: -x[1]),
total=self.hist.get(*subpop)
)
def most_assoc(self, test_group, value, *subpop):
""" Rank test_group by greatest association with value within
subpop.
"""
data, a = [], self.assoc.report(test_group, value)
for assoc in a:
mutable = set(assoc)
mutable.remove(value)
data.append((''.join(mutable), a[assoc][frozenset()]))
return sorted(data, key=lambda x: -x[1])
def extremes(self):
""" Find absolute most associations within all associations
for both specific field values as well as generic field types.
Return data in format that can be easily interpreted by
AsciiTable().
"""
gens = sorted([(k, self.gen_assoc[k]) for k in self.gen_assoc],
key=lambda x: x[1])
fmat = lambda x: [(', '.join(s[:30] for s in i[0]), i[1]) for i in x]
gl, gm = fmat(gens[:3]), fmat(gens[:-4:-1])
sl, sm = fmat(self.mins), fmat(self.maxes)
return (('Generic Associations',
('Most Associated', gm),
('Least Associated', gl)),
('Specific Associations',
('Most Associated', sm),
('Least Associated', sl)))
def prep_hist(self, field, other_field, notable=1, subpop=''):
""" Filter out irrelevant data for plotting """
bins = self.hist.valdicts_dict[field]
associations = self.assoc.report(field, other_field)
keep_of = set()
if field in self.time:
# Keep all bins for time
keep_of = set(self.hist.valists_dict[field])
else:
# For non-time, remove excess bins without notable data
for key in associations:
try:
ratio = associations[key][frozenset(subpop)]
except KeyError:
continue
# Only keep notable bins
if ratio >= notable or ratio <= 1/float(notable):
for myfield in key:
typ = self.hist.field_index[myfield]
if typ == field:
keep_of.add(myfield)
# Don't include mis-entered data (eg. ages > 200 years)
try:
keep_of.remove('typo')
except KeyError:
pass
# Sort bins (eg. time related such as ages)
bins = self.bin_sort([s for s in bins if s in keep_of])
return bins, associations
def make_hist(self, field, other_field, notable=1, subpop=''):
""" This method creates the data structure for a histogram plot
given the names of the fields we want to compare. This is done
manually because we are working with already existent bins.
This method is long, but it is primarily a single cohesive
loop that serves a single purpose.
field: x-axis
other_field: legend
notable: threshold for ratio of notable data (also inverse)
"""
keep_f, values, hists = set(), [], []
skip, top = False, 0
bins, associations = self.prep_hist(
field, other_field, notable, subpop
)
bindex = invert(bins)
empty_things = np.zeros(len(bins))
# Traverse each association to find and add notable data
for key in associations:
# For each of the two items per association
for myfield in key:
if myfield == 'typo':
# Typo ages not useful
skip = True
# Determine whether item is from field or other_field
typ = self.hist.field_index[myfield]
if typ == other_field:
# Legend fields
value = myfield
# Only keep notable legend fields
if myfield not in values:
values.append(myfield)
hists.append(copy(empty_things))
elif myfield not in bins:
# Keep out unwanted bins
skip = True
else: # Bin fields
actual = myfield
# Gather association ratio for combination
try:
ratio = associations[key][frozenset(subpop)]
except KeyError:
skip = True
if skip == True:
skip = False
continue
# Record highest value among all data
top = max(ratio, top)
# index i used in original histogram
index = values.index(value)
if ratio >= notable or ratio <= 1/float(notable):
# Save index for this field so we keep its data in the end
keep_f.add(index)
try:
# Record the ratio if we decided it is notable above
hists[index][bindex[actual]] = ratio
except KeyError:
pass
# Filter out data which is not notable
keepers = [value for i, value in enumerate(hists) if i in keep_f]
new_values = [value for i, value in enumerate(values) if i in keep_f]
return bins, new_values, top, keepers
def plot_hist(self, title, xlabel, ylabel, bins, ds_names,
*data_sets, log=False, legend=True):
""" Plot pre-binned histogram. """
ds_names = [
name if len(name) < 60 else name[:59] + '...' for name in ds_names
]
colors = ('b', 'r', 'g', 'c', 'm', 'y', 'k', 'w')
n_sets = len(data_sets)
if n_sets == 0:
return
if n_sets == 1:
colors = 'g'
fit = 0.7 if n_sets > 1 else 1
width = fit/n_sets
plt.figure(self.plot_counter)
index = np.arange(len(data_sets[0]))
for i, data_set in enumerate(data_sets):
plt.bar(index + i * width, data_set, width, alpha=0.4,
color=colors[i % 8], label=ds_names[i])
plt.title(title); plt.xlabel(xlabel); plt.ylabel(ylabel)
# Use log base 10 scale
if log:
plt.yscale('log')
wrap_width = int(250/len(data_sets[0]))
plt.xticks(index + fit/2, [textwrap.fill(s, wrap_width) for s in bins])
plt.tight_layout()
# Horizontal dotted line at 1
plt.axes().plot([-0.05, len(data_sets[0]) - 0.25], [1, 1], "k--")
if legend:
plt.legend()
#plt.plot()
self.plot_counter += 1
def nice_plot_assoc(self, one, two, title=False, xlabel=False,
bins=False, notable=1.5, subpop='', force=False):
""" Try plot with arbitrary limitation first, change if needed. """
while notable > 1:
# This means floats such as 0.9999999999999997 will
# be excluded, but we don't want < 1.1 anyway.
bad = self.plot_assoc(
one, two, title, xlabel, bins, notable, subpop
)
if bad == 'high':
notable += 0.1
elif bad == 'low':
notable -= 0.1
elif bad == None:
break
else:
raise RuntimeError
else:
if force:
self.plot_assoc(
one, two, title, xlabel, bins, notable, subpop, force=True
)
def savefig(self, name):
fig = plt.gcf()
fig.set_size_inches(25, 15)
fig.savefig(
os.path.join(self.plot_dir, name + '.' + self.plot_format),
bbox_inches='tight',
dpi=100
)
def plot_assoc(self, one, two, title=False, xlabel=False,
bins=False, notable=2, subpop='', force=False):
""" Plot associations between values one and two. Extract a
more complete data set from the histogram and make the plot.
"""
One, Two = one.capitalize(), two.capitalize()
if not xlabel:
xlabel = one.capitalize() + 's'
if not title:
title = 'Associations between {} and {}'.format(One, Two)
if not bins:
bins = self.hist.valists_dict[one]
ylabel = 'Association Ratio'
bins, names, top, data = self.make_hist(
one, two, notable, subpop=subpop
)
log = True if top > 10 else False
if not force:
if len(data) < 2 or len(data[0]) < 2:
return 'low'
if len(data) > 8:
return 'high'
self.plot_hist(title, xlabel, ylabel, bins, names, *data, log=log)
name = one +', '+ two + (' for ' + istr(subpop) if subpop else '')
self.savefig(name)
def max_helper(self, one, two):
""" Record most associated specific pairs within the generic
one two combination and find mean for one two combo.
"""
c, tot, val = 0, 0, 1
search = self.assoc.report(one, two)
for combo in search:
try:
val = search[combo][frozenset()]
except KeyError:
continue
# Find specific maxes
for big_one in self.maxes:
if val > big_one[1]:
self.maxes.append((combo, val))
self.maxes = sorted(self.maxes, key=lambda x: -x[1])[:3]
break
# Find specific mins
for small_one in self.mins:
if val < small_one[1]:
self.mins.append((combo, val))
self.mins = sorted(self.mins, key=lambda x: x[1])[:3]
break
# Standardize so anti-associations are equally represented
if val < 1: val = 1/val
# To do: Improve by weighting by occurrences.
c += 1
tot += val
# Record average association for combo type
self.gen_assoc[frozenset((one, two))] = tot/c
def plot_all(self):
""" Plot the associations between every possible field pair. """
memo = set()
h = self.hist
for one in self.hist.fieldict:
for two in self.hist.fieldict:
# Plots must meet these conditions:
# - one and two must be different.
# - Times should be bins only unless both bin + legend are times
# - Group of bins is larger than legend unless bins are time.
# - If both bins and legend are times, larger set is bins.
ugly_sizes = len(h.valists[h.fieldict[two]]) > \
len(h.valists[h.fieldict[one]])
if one == two \
or (
one not in self.time
and (two in self.time or ugly_sizes)
) or (
one in self.time
and two in self.time
and ugly_sizes
):
continue
self.nice_plot_assoc(one, two)
# For efficiency, use opportunity to run through nested loops
# to determine how associated each field pair is overall.
self.max_helper(one, two)
class AsciiTable():
""" This grew out of a need to portray specific data that was
found using this module. Originally, it was just one method in
Analysis(), but that no longer seemed appropriate so it has been
split off. It needs some work to become friendly for general use.
"""
def __init__(self):
self.tables = []
def __str__(self):
return '\n\n'.join(self.tables)
def add_table(self, *args):
""" Create an ascii table out of sections from self.table_section()
"""
formatted = tuple(chain(*[self.table_section(*a) for a in args]))
tuples = [(x[0], istr(x[1])) for x in formatted if isinstance(x, tuple)]
lens = [len(max(i, key=len)) for i in tuple(zip(*tuples))]
tlen = sum(lens)
t, hb = [], '─'*(tlen + 3)
t.append('┌' + hb + '┐')
for item in formatted:
if item == 'hb' and len(t) == 1:
continue
if item == 'hb':
t.append('├' + hb + '┤')
elif isinstance(item, tuple):
pad = [(lens[i] - len(istr(item[i])) + 1) * ' ' for i in (0, 1)]
t.append(('│{}{}│{}{}│').format(item[0], pad[0],
pad[1], istr(item[1])))
else:
this_pad = (tlen+3 - len(item))
if item[:2] == '==':
l = ' '*int(this_pad / 2)
r = ' '*(this_pad % 2) + l
t.append('│' + l + item + r + '│')
else:
t.append('│' + item + ' '*(tlen+3 - len(item)) + '│')
t.append('└' + hb + '┘')
table = '\n'.join(t)
self.tables.append(table)
return '\n'.join(t)
def table_section(self, title, *subsects):
""" Create a table section from question output that can be
properly formatted by self.table()
For multiple subsects:
subsect[0]: subsect title
subsect[1]: subsect data
"""
h = 'hb'
ret = (h, '== ' + title + ' ==', h)
if len(subsects) == 1:
for line in subsects[0]:
ret += (line,)
ret += (h,)
else:
for subsect in subsects:
ret += (subsect[0], h)
for line in subsect[1]:
ret += (line,)
ret += (h,)
return ret
|
gpl-3.0
|
shenzebang/scikit-learn
|
sklearn/tests/test_pipeline.py
|
162
|
14875
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/examples/pylab_examples/demo_annotation_box.py
|
3
|
2571
|
import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, \
AnnotationBbox
from matplotlib.cbook import get_sample_data
import numpy as np
if 1:
fig = plt.gcf()
fig.clf()
ax = plt.subplot(111)
offsetbox = TextArea("Test 1", minimumdescent=False)
xy = (0.5, 0.7)
ax.plot(xy[0], xy[1], ".r")
ab = AnnotationBbox(offsetbox, xy,
xybox=(-20, 40),
xycoords='data',
boxcoords="offset points",
arrowprops=dict(arrowstyle="->"))
ax.add_artist(ab)
offsetbox = TextArea("Test", minimumdescent=False)
ab = AnnotationBbox(offsetbox, xy,
xybox=(1.02, xy[1]),
xycoords='data',
boxcoords=("axes fraction", "data"),
box_alignment=(0.,0.5),
arrowprops=dict(arrowstyle="->"))
ax.add_artist(ab)
from matplotlib.patches import Circle
da = DrawingArea(20, 20, 0, 0)
p = Circle((10, 10), 10)
da.add_artist(p)
xy = [0.3, 0.55]
ab = AnnotationBbox(da, xy,
xybox=(1.02, xy[1]),
xycoords='data',
boxcoords=("axes fraction", "data"),
box_alignment=(0.,0.5),
arrowprops=dict(arrowstyle="->"))
#arrowprops=None)
ax.add_artist(ab)
arr = np.arange(100).reshape((10,10))
im = OffsetImage(arr, zoom=2)
ab = AnnotationBbox(im, xy,
xybox=(-50., 50.),
xycoords='data',
boxcoords="offset points",
pad=0.3,
arrowprops=dict(arrowstyle="->"))
#arrowprops=None)
ax.add_artist(ab)
# another image
from matplotlib._png import read_png
fn = get_sample_data("lena.png", asfileobj=False)
arr_lena = read_png(fn)
imagebox = OffsetImage(arr_lena, zoom=0.2)
ab = AnnotationBbox(imagebox, xy,
xybox=(120., -80.),
xycoords='data',
boxcoords="offset points",
pad=0.5,
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=3")
)
ax.add_artist(ab)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.draw()
plt.show()
|
gpl-2.0
|
jacquev6/Pynamixel
|
doc/conf.py
|
1
|
2155
|
# coding: utf-8
# Copyright 2015 Vincent Jacques <[email protected]>
project = "Pynamixel"
author = '<a href="http://vincent-jacques.net/contact">Vincent Jacques</a>'
copyright = ('2015-2018 {} <script>var jacquev6_ribbon_github="{}"</script>'.format(author, project) +
'<script src="https://jacquev6.github.io/ribbon.js"></script>')
master_doc = "index"
extensions = []
nitpicky = True
# https://github.com/bitprophet/alabaster
html_sidebars = {
"**": ["about.html", "navigation.html", "searchbox.html"],
}
html_theme_options = {
"github_user": "jacquev6",
"github_repo": project,
"travis_button": True,
}
# @todoc html_logo
# http://sphinx-doc.org/ext/autodoc.html
extensions.append("sphinx.ext.autodoc")
autodoc_member_order = "bysource"
autodoc_default_flags = ["members"]
add_module_names = False
add_class_names = False
# http://sphinx-doc.org/ext/githubpages.html
extensions.append("sphinx.ext.githubpages")
# http://sphinx-doc.org/ext/doctest.html
extensions.append("sphinx.ext.doctest")
doctest_global_setup = """
# This setup is for README.rst's doctests.
# @todo Find a way to put that new actual doctests. See the message of the commit that introduced this line.
import MockMockMock
import Pynamixel
hardware_mock = MockMockMock.Engine().create("hardware")
hardware = hardware_mock.object
hardware_mock.expect.send([0xFF, 0xFF, 0x01, 0x05, 0x03, 0x1E, 0x00, 0x02, 0xD6])
hardware_mock.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x02])
hardware_mock.expect.receive(2).and_return([0x00, 0xFC])
hardware_mock.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2E, 0x01, 0xC9]).and_return(None)
hardware_mock.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x03])
hardware_mock.expect.receive(3).and_return([0x00, 0x01, 0xFA])
"""
# http://sphinx-doc.org/latest/ext/math.html
extensions.append("sphinx.ext.mathjax")
# http://matplotlib.org/devel/documenting_mpl.html#module-matplotlib.sphinxext.plot_directive
extensions.append("matplotlib.sphinxext.plot_directive")
plot_include_source = True
plot_html_show_source_link = False
plot_formats = [("png", 160)]
plot_html_show_formats = False
|
mit
|
Jimmy-Morzaria/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
6
|
15189
|
import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
peastman/deepchem
|
examples/low_data/muv_rf_one_fold.py
|
9
|
2031
|
"""
Train low-data MUV models with random forests. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import tempfile
import numpy as np
import deepchem as dc
from datasets import load_muv_ecfp
from sklearn.ensemble import RandomForestClassifier
# 4-fold splits
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
# 10 trials on test-set
n_trials = 20
tox21_tasks, dataset, transformers = load_muv_ecfp()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Get supports on test-set
support_generator = dc.data.SupportGenerator(
test_dataset, n_pos, n_neg, n_trials)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for (task, support) in support_generator:
# Train model on support
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=100)
model = dc.models.SklearnModel(sklearn_model)
model.fit(support)
# Test model
task_dataset = dc.data.get_task_dataset_minus_support(
test_dataset, support, task)
y_pred = model.predict_proba(task_dataset)
score = metric.compute_metric(
task_dataset.y, y_pred, task_dataset.w)
print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
print("Mean scores")
print(mean_task_scores)
print("Standard Deviations")
print(std_task_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_task_scores.values())))
|
mit
|
samuel1208/scikit-learn
|
sklearn/cluster/__init__.py
|
364
|
1228
|
"""
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
|
bsd-3-clause
|
bbcdli/xuexi
|
fenlei_tf/script_2019Nov/src/version1/tensor_eva_Dec15.py
|
2
|
69387
|
#originally by Hamed, 25Apr.2016
#hy:Changes by Haiyan, change logs are in tensor_train.py
#####################################################################################################
import ImageDraw
import ImageFilter
import ImageOps
import time
from functools import wraps
from random import randint
import os
import sys
import datetime
import settings #hy: collection of global variables
import image_distortions
import prepare_list
import read_images
import tools
import cv2
import numpy as np
import tensorflow as tf
from sklearn import datasets
import math
import imutils
from PIL import Image #hy: create video with images
#activate global var
settings.set_global()
start_time = time.time()
#http://lvdmaaten.github.io/tsne/ visualization
## Train or Evaluation
############################################################
do_active_fields_test = 0
TEST_with_Webcam = False #hy True - test with webcam
video_label = 0 #hy: initialize/default 0:hinten 1:links 2:'oben/', 3:'rechts/', '4: unten/', 5 'vorn/
TEST_with_Images = True #hy True - test with images
App_for_Images = False
TEST_with_Video = False #hy True - test with video
video_window_scale = 2
act_min = 0.80
act_max = 0.93
add_data = 0 #initial
area_step_size_webcam = 20 #479 #200
optimizer_type = 'GD' #'adam' #GD-'gradient.descent'
learning_rate = 0.0956# TODO 0.05 0.005 better, 0.001 good \0.02, 0.13
n_hidden = 60
TEST_CONV_OUTPUT = False
result_for_table = 1
SAVE_Misclassified = 0
SAVE_CorrectClassified = 0
GENERATE_FILELIST = 1
log_on = False
DEBUG = 0
#TrainingProp = 0.70
#Val_step = 10
# Network Parameters
#n_input = 42 * 42 # Cifar data input (img shape: 32*32)
n_input = settings.h_resize * settings.w_resize #hy
n_classes = len(settings.LABELS) #hy: adapt to lego composed of 6 classes. Cifar10 total classes (0-9 digits)
dropout = 0.5 # Dropout, probability to keep units
#n_hidden = 60
#batch_size = 1 # 128
#beta1 = 0.9
#beta2 = 0.999
#epsilon = 0.009
# Noise level
#noise_level = 0
#trained_model = "./" + "model_GD720_h184_w184_c6all-6881.meta"
trained_model = "/home/hamed/Documents/Lego_copy/tensor_model_sum/" + "model_II_GD60_h227_w227_c6_U0.75-14431.meta"
#trained_model = "/home/hamed/Documents/Lego_copy/tensor_model_sum/" + "model_GD360_h184_w184_c6_3conv_L0.7_O1.0_U1.0_7_0.71-6381.meta"
#Data
LABEL_LIST = './FileList.txt'
LABEL_PATH = settings.data + "/*/*/*"
#LABEL_LIST_TEST = './FileList_TEST1_sing.txt'
LABEL_LIST_TEST = './FileList_TEST.txt'
LABEL_PATH_TEST = settings.test_images + "/*/*"
#LABEL_PATH_TEST = "./Test_Images/testpkg_activation/oben/*"
#LABEL_PATH_TEST = "./Test_Images/testpkg2_no_bg/*/*" #8391 H,L,O,U 42.8 // 7831 H,L,O,U 44.3 // 8421 H,L,O,U 43.2 //
#LABEL_PATH_TEST = "./Test_Images/testpkg3_white_200x200/*/*" # L,O, R 43.2 L,O,R 43.2 O,R,V 42.8
#LABEL_PATH_TEST = "./Test_Images/testpkg5big_224x224/*/*" # H,O,U 26.7 H,O,U 26.5 H,O,U 0.27
#LABEL_PATH_TEST = "./Test_Images/testpkg6big/*/*" #
# Active fields test for visualization
if do_active_fields_test == 1:
print 'To get active fields analysis you must set read_images to sorted read'
LABEL_PATH_TEST = "./Test_Images/test_active_fields/*/*" #
LABEL_LIST_TEST = settings.test_label_file_a
activation_test_img_name = './Test_Images/hinten_ori1_rz400.jpg'
LABELS = settings.LABELS #hy
LABEL_names = settings.LABEL_names #hy
#hy:add timestamp to tensor log files
from datetime import datetime
tensorboard_path = './Tensorboard_data/sum107/'+str(datetime.now())+'/'
if GENERATE_FILELIST == 1:
print 'preparing label list'
tools.prepare_list(LABEL_LIST_TEST, LABEL_PATH_TEST) #hy: avoid wrong list error #hy trial
print 'loading data'
tools.read_images(LABEL_LIST_TEST) #hy: get usable input size for w,h
else:
if TEST_with_Images or TEST_with_Video:
tools.read_images(LABEL_LIST_TEST)
else:
tools.read_images(LABEL_LIST)
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
from datetime import datetime
str_log = optimizer_type
self.log = open(datetime.now().strftime('log_%Y_%m_%d_%H_%M' + str_log + '.log'), "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
if log_on and (TEST_with_Video):
sys.stdout = Logger()
############################################################################################################
# hy: initialize crop frame (interest of area in demo window)
# At the moment, this window has to be adjusted to focus our object.
# Different area shown in focus region leads to different test result.
############################################################################################################
def track_roi(VIDEO_FILE):
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
# Read the first frame of the video
ret, frame = video.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
c,r,w,h = 900,650,400,400
track_window = (c,r,w,h)
# Create mask and normalized histogram
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) #hy: TERM_CRITERIA_EPS - terminate iteration condition
while True:
ret, frame = video.read()
if ret:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
x,y,w,h = track_window
#hy: draw rectangle as tracked window area
cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255,255,255), 2, cv2.CV_AA)
cv2.imshow('Tracking', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print 'no frame received'
break
return [track_window]
def EVALUATE_IMAGES(session,num_class,img_list,_labels): #(eva)
sess = session
LABEL_LIST_TEST = img_list
LABELS = _labels
n_classes = num_class
################### active field test part one ################################
if do_active_fields_test == 1:
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST, random_read=False)
TEST_length = len(carimages)
print '1 file',LABEL_LIST_TEST, 'path', LABEL_PATH_TEST, 'len', TEST_length
#TEST_length = 1
print 'get active fields'
row = 0
col = 0
test_img_bg = cv2.imread(activation_test_img_name)
test_img_bg = cv2.resize(test_img_bg, (400, 400))
overlay = np.zeros([400, 400, 3], dtype=np.uint8)
test_img_transparent = overlay.copy()
cv2.rectangle(overlay, (0, 0), (400, 400), color=(60, 80, 30, 3))
alpha = 0.7 # hy: parameter for degree of transparency
cv2.addWeighted(overlay, alpha, test_img_bg, 1 - alpha, 0, test_img_transparent)
bg = Image.fromarray(test_img_transparent)
else:
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST, random_read=False)
TEST_length = len(carimages)
print 'file', LABEL_LIST_TEST, 'path', LABEL_PATH_TEST
if DEBUG == 1 and do_active_fields_test == 1:
overlay_show = Image.fromarray(overlay)
overlay_show.save('./1-overlay.jpg')
bg.save('./1-before.jpg')
################### active field test part one end ############################
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
# digits.images = carimages.reshape((len(carimages), -1))
"""
print '\n'
print "4.print shape of database: ", digits.images.shape # hy
digits.images = np.expand_dims(np.array(digits.images), 2).astype(np.float32)
print "4.1.print shape of database after expansion: ", digits.images.shape # hy
digits.target = np.array(cartargets).astype(np.int32)
digits.target = dense_to_one_hot(digits.target)
print '\n'
print "5.print target"
print digits.target
"""
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float) #hy collect detailed confusion matrix
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
pred_collect = []
if result_for_table == 0:
print 'True/False', 'No.', 'Name', 'TargetLabel', 'PredictLabel', 'Precision','whole_list','Top1','Top1_pres', \
'Top2', 'Top2_pres','Top3','Top3_pres','Top4','Top4_pres','Top5','Top5_pres','last','last_pres'
for i in range(0, TEST_length, 1):
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = carimages[i]
# im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
#print test_image
#print sess.run(test_image)
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
#output = sess.run("Accuracy:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
output = tools.convert_to_confidence(output) #
np.set_printoptions(precision=3)
RES = np.argmax(output) #hy predicted label
label_target = int(cartargets[i]) #hy ground truth label
#label_pred_str, label_pred_num = tools.convert_result(RES)
#label_target_str, label_target_num = tools.convert_result(label_target)
sorted_vec, prob_all = tools.rank_index(output[0],label_target)
pred_collect.append(prob_all[0])
################### active field test part two start ################################
if do_active_fields_test == 1:
if col >= 4:
#print '\ncol is 4'
col = 0
row += 1
if row >= 4:
#print '\nrow is 4'
row = 0
positions = ((col)*100, (row)*100, (col+1)*100, (row+1)*100) #x0,y0, x1,y1
col += 1
#define image for obtaining its active fields
#activation_test_img = Image.open('./hintenTest.jpg')
#activation_test_img = Image.open('./vornTest.jpg')
#activation_test_img = Image.open('./tmp/resized/links/links_t2_1_rz400_d0_0400_1.jpg')
#activation_test_img = Image.open('./tmp/resized/links/links_t2_1_rz400_u870_400400.jpg')
#activation_test_img = Image.open('./Test_Images/hinten_ori1_rz400.jpg')
#activation_test_img = Image.open('./tmp/resized/oben/oben_t2_1_rz400_u856_400400.jpg')
#activation_test_img = Image.open('./tmp/resized/unten/unten_t2_1_rz400_d0_0400_1.jpg')
#activation_test_img = Image.open('./tmp/resized/unten/unten_t2_1_rz400_u923_400400.jpg')
#activation_test_img = Image.open('./tmp/resized/rechts/rechts_t2_1_rz400_d0_0400_1.jpg')
#activation_test_img = Image.open('./tmp/resized/rechts/rechts_t2_1_rz400_u825_400400.jpg')
#activation_test_img_copy = cv2.clone(activation_test_img)
activation_test_img = Image.open(activation_test_img_name)
thresh = float(max(pred_collect)*0.97)
print 'thresh', thresh
if prob_all[0] > thresh:
#print '\nactive field', positions
image_crop_part = activation_test_img.crop(positions)
image_crop_part = image_crop_part.filter(ImageFilter.GaussianBlur(radius=1))
bg.paste(image_crop_part, positions)
bg.save('./active_fields.jpg')
################### active field test end ################################
if result_for_table == 1:
if LABELS[label_target][:-1] == LABELS[RES][:-1]:
print '\nTestImage',i+1,f[i],LABELS[label_target][:-1]\
,LABELS[RES][:-1],prob_all[0],
for img_i in xrange(n_classes):
print settings.LABEL_names[sorted_vec[n_classes-1-img_i]], prob_all[img_i],
else:
print '\nMis-C-TestImage',i+1,f[i],LABELS[label_target][:-1],\
LABELS[RES][:-1],prob_all[0],
for img_i in xrange(n_classes):
print settings.LABEL_names[sorted_vec[n_classes-1-img_i]], prob_all[img_i],
if result_for_table == 0:
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Ground truth label:', LABELS[label_target][:-1], '; predict:', LABELS[RES][:-1] # hy
# print 'Target:', label_target, '; predict:', RES # hy
print '\nRank list of predicted results'
tools.rank_index(output[0], label_target)
label = label_target
predict = int(RES)
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
count_labels[:, label] = count_labels[:, label] + 1
if predict == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i],SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i],SAVE_Misclassified)
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
#print summary
print '\n\nCount correctly classified'
tools.print_label_title()
print confMat3
print 'Total labels'
print count_labels
print '\nProportion of correctly classified'
for pos in range(0, n_classes, 1):
if count_labels[:, pos] > 0:
class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
print class_probability
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print confMat2_TEST
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
###################################################################################
## Feature output #################################################################
###################################################################################
if TEST_CONV_OUTPUT:
print '\nTEST feature output:'
test_writer = tf.train.SummaryWriter(tensorboard_path + settings.LABELS[label_target], sess.graph)
wc1 = sess.run("wc1:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
wc2 = sess.run("wc2:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
wd1 = sess.run("wd1:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
w_out = sess.run("w_out:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bc1 = sess.run("bc1:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bc2 = sess.run("bc2:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
bd1 = sess.run("bd1:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
b_out = sess.run("b_out:0",feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
conv_feature = sess.run("conv2:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
#conv_feature_2D_batch = tools.get_feature_map(conv_feature,f,'conv2') #get defined conv value, not sure for conv2
#featureImg = sess.run("conv2img:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
summary_op = tf.merge_all_summaries()
test_res = sess.run(summary_op, feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1})
test_writer.add_summary(test_res, 1)
#print '2D size',len(conv_feature_2D_batch),'\n',sum(conv_feature_2D_batch[:])
print 'wc1 shape',wc1.shape, 'wc2:',wc2.shape, 'wd1:',wd1.shape,'w_out:',w_out.shape
print 'bc1 shape ',bc1.shape, 'bc2:',' ',bc2.shape, 'bd1: ',bd1.shape,'b_out: ',b_out.shape
print 'pred shape', len(pred_collect)
else:
print 'no image got'
return (confMat1_TEST,count_labels,confMat3,class_probability)
def EVALUATE_IMAGES_VAGUE(n_classes,img_list):
LABEL_LIST_TEST = img_list
# Testing
cartargets, f = tools.read_test_images(LABEL_LIST_TEST)
#print 'cartargets label', cartargets
TEST_length = 20
#TEST_length = len(cartargets)
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
# digits.images = carimages.reshape((len(carimages), -1))
"""
print '\n'
print "4.print shape of database: ", digits.images.shape # hy
digits.images = np.expand_dims(np.array(digits.images), 2).astype(np.float32)
print "4.1.print shape of database after expansion: ", digits.images.shape # hy
digits.target = np.array(cartargets).astype(np.int32)
digits.target = dense_to_one_hot(digits.target)
print '\n'
print "5.print target"
print digits.target
"""
confMat_m1_TEST = np.zeros((n_classes, n_classes), dtype=np.float)
confMat_m2_TEST = np.zeros((2, 2), dtype=np.float)
confMat_m3 = np.zeros((1, n_classes), dtype=np.float)
count_labels_m = np.zeros((1, n_classes), dtype=np.float)
class_probability_m = np.zeros((1, n_classes), dtype=np.float)
patch_size = 42 #227
for i in range(0, TEST_length, 1):
# hy:extra Debug
#im = carimages[i]
# im = frame_crop_resize_gray # Lazy
'''
#hy: option to use numpy.ndarray, but it cannot use attribute 'crop' of Image (integer) object
img = cv2.imread(f[i])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, width=patch_size, height=patch_size)
h_b, w_b = img.shape
print 'h_b', h_b, ', w_b', w_b
'''
print 'processing main test image',f[i]
#hy: use integer image: Image, resize
img = Image.open(f[i]).convert('LA') #convert to gray
h_b, w_b = img.size
#print 'read test image ok', h_b, ', ', w_b
img = img.resize((patch_size * 2, patch_size * 2), Image.BICUBIC) # hy:use bicubic
#h_b, w_b = img.size
#print 'h_b', h_b, ', w_b', w_b
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
test_image = img
test_image_label = cartargets[i]
# Doing something very stupid here, fix it!
#test_image = im.reshape((-1, im.size))
# test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
#test_image = test_image / 255 - 0.5 # TODO here is tricky, double check with respect to the formats
slices_rec = image_distortions.create_test_slices(test_image,patch_size,test_image_label)
print 'slices with path received', slices_rec
slices_len = len(slices_rec)
out_sum = np.zeros((1, n_classes), dtype=np.float)
out_box = np.zeros((1, n_classes), dtype=np.float)
#batch_xs, batch_ys = im, cartargets
#output_im = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
for j in range(0, slices_len, 1):
print '\nprocessing slice', j, slices_rec[j]
#hy read and resize integer object
#im_s = Image.open(slices_rec[j]) #numpy.ndarray does not have attribute 'crop'
#im_s = im_s.resize((settings.h_resize, settings.w_resize), Image.BICUBIC) # hy:use bicubic, resize func reuqires integer object
#im_s = im_s.convert('LA') #hy convert to gray
#hy read and resize continuous number object
im_s = cv2.imread(slices_rec[j]) #result is not integer
im_s = cv2.cvtColor(im_s, cv2.COLOR_BGR2GRAY)
im_s = imutils.resize(im_s, width=settings.h_resize, height=settings.w_resize)
#hy conver to integer object required for tensor
im_s = np.asarray(im_s, np.float32)
CONF = 0.20
(sorted_vec,outputsub)= EVALUATE_IMAGE_SLICES(im_s,f,i,sess, cartargets)
print 'slice',j, 'result', sorted_vec
print 'Image slice', slices_rec[j]
outbox = outputsub
out_sum = out_sum + outputsub[0]
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print out_sum
print out_sum/slices_len
outbox[0] = out_sum/slices_len
output_im,prob_all = tools.rank_index(outbox[0],test_image_label)
print 'target', test_image_label
print 'output final prediction', output_im[-1]
RES = int(output_im[-1])
print 'test_image_label', test_image_label
label = test_image_label
predict = int(RES)
confMat_m1_TEST[label, predict] = confMat_m1_TEST[label, predict] + 1
count_labels_m[:, test_image_label] = count_labels_m[:, test_image_label] + 1
if int(RES) == int(test_image_label):
label2_TEST = 0
pred2_TEST = 0
confMat_m3[:, int(RES)] = confMat_m3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i],SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i],SAVE_Misclassified)
# print 'Count classified'
# tools.print_label_title()
# print confMat1_TEST
confMat_m2_TEST[label2_TEST, pred2_TEST] = confMat_m2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat_m2_TEST[0, 0]
tn = confMat_m2_TEST[1, 1]
print 'Count classified m1 - confusion matrix'
tools.print_label_title()
print confMat_m1_TEST
print '\nCount correctly classified -m3'
tools.print_label_title()
print confMat_m3
print 'tp,np -m2'
print confMat_m2_TEST
print 'Total labels'
print count_labels_m
print 'Proportion of correctly classified for detailed analysis' #ok
if count_labels_m[:, pos] > 0:
for pos in range(0, n_classes, 1):
class_probability_m[:, pos] = confMat_m3[:, pos] / count_labels_m[:, pos]
print class_probability_m
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
def EVALUATE_IMAGE_SLICES(img,f,index,sess, cartargets,num_class): #hy todo change dimension to fit tensorflow
n_classes = num_class
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float)
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
img_s = img
i = index
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# Doing something very stupid here, fix it!
test_image = img_s.reshape((-1, img_s.size))
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check with respect to the formats
batch_xs1, batch_ys1 = test_image, test_lables
output = sess.run("pred:0", feed_dict={"x:0": batch_xs1, "y:0": batch_ys1, "keep_prob:0": 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output) #
np.set_printoptions(precision=3)
RES = np.argmax(output)
label_target = int(cartargets[i])
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Target:', LABELS[label_target][:-1], '; predict:', LABELS[RES][:-1] # hy
# print 'Target:', label_target, '; predict:', RES # hy
count_labels[:, label_target] = count_labels[:, label_target] + 1
label = label_target
predict = int(RES)
# hy: INFO - print label, predict
# print 'labels_onehot:', labels_onehot[i, :], ' label=', label
# print 'score:', scores[i, :]
# print 'predict:', predict
#if label == predict:
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
if int(RES) == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i],SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i],SAVE_Misclassified)
#print 'Count classified'
#tools.print_label_title()
#print confMat1_TEST
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
print '\nCount correctly classified'
tools.print_label_title()
print confMat3
#print 'Total labels'
#print count_labels
#print 'Proportion of correctly classified'
#if count_labels[:, pos] > 0:
#for pos in range(0, 6, 1):
# class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
#print class_probability
#print '\nRank list of predicted results'
sorted_vec,prob_all = tools.rank_index(output[0], label_target)
#return (confMat1_TEST, confMat2_TEST, confMat3, count_labels, class_probability,sorted_vec,output)
return (sorted_vec,output)
def EVALUATE_WITH_WEBCAM(camera_port, stop,num_class):
n_classes = num_class
#hy: check camera availability
camera = cv2.VideoCapture(camera_port)
if stop == False:
#if ckpt and ckpt.model_checkpoint_path:
# Camera 0 is the integrated web cam on my netbook
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 1
i = 0
while True: #hy: confirm camera is available
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
print 'Getting image...'
ret, frame = camera.read()
# Captures a single image from the camera and returns it in PIL format
#ret = camera.set(3, 320) #hy use properties 3 and 4 to set frame resolution. 3- w, 4- h
#ret = camera.set(4, 240)
cv2.waitKey(1)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide.
# cv2.imwrite(file, camera_capture)
#################################### /////////////////////////////
if frame is not None:
# print 'frame from webcam obtained'
# hy: before continue check if image is read correctly
# while frame is not None:
i += 1
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# hy: info
print "h_video and w_video", h_frame, ",", w_frame
# cv2.imshow("ori", frame)
#crop_x1 = int((w_frame - area_step_size_webcam) / 2)
#crop_y1 = int((h_frame - area_step_size_webcam) / 2) # 1#200
#crop_x2 = crop_x1 + area_step_size_webcam
#crop_y2 = int(crop_y1 + area_step_size_webcam * settings.h_resize / settings.w_resize)
crop_y1 = int((h_frame - area_step_size_webcam) / 2) # 1#200
crop_x1 = int((w_frame - area_step_size_webcam) / 2)
crop_y2 = crop_y1 + area_step_size_webcam #hy:define shorter side as unit length to avoid decimal
crop_x2 = crop_x1 + area_step_size_webcam * settings.w_resize/settings.h_resize
#print "x1,y1,x2,y2", crop_x1, 'x', crop_y1, ',', crop_x2, 'x', crop_y2
# Crop
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
# print "shape:y1,y2,x1,x2:", crop_y1," ", crop_y2," ", crop_x1," ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY),
width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 227)) #hy trial
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
label_pred_str = LABELS[RES][:-1]
#label_pred_str, label_pred_num = tools.convert_result(RES)
#print 'label_pred_str', label_pred_str
print 'predicted label:', LABELS[RES][:-1]
if label_pred_str == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % i
tools.SAVE_CorrectClassified_frame(name_str,frame_crop,SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
#cv2.putText(frame, "predicted1: " + label_pred_str, org=(w_frame / 10, h_frame / 20),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted1: " + label_pred_str, org=(w_frame / 10, h_frame / 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
prob_str = str(output[0][RES])[:4]
cv2.putText(frame, "prob:" + prob_str, org=(w_frame / 10, h_frame / 8),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
# hy: could be modified to display desired label
# cv2.putText(frame, LABELS[RES], org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
# cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
#TODO add termination condition
print 'no frame retrieved'
del(camera)
return stop
def EVALUATE_WITH_WEBCAM_track_roi(camera_port,num_class):
n_classes = num_class
frame_index_i = 0
crop_x1 = 300
area_step_size = 200
crop_y1 = 200
# hy: check camera availability
camera = cv2.VideoCapture(camera_port)
# Read the first frame of the video
ret, frame = camera.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
###############################################################################################
# Track
###############################################################################################
c, r, w, h = 100, 200, 200, 200
track_window = (c, r, w, h)
# track_window = (x0, y0, w, h)
# Create mask and normalized histogram
roi = frame[r:r + h, c:c + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) # hy: TERM_CRITERIA_EPS - terminate iteration condition
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
while True: # hy: confirm camera is available
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
print 'Getting image...'
# Captures a single image from the camera and returns it in PIL format
ret, frame = camera.read()
# ret = camera.set(3, 320) #hy use properties 3 and 4 to set frame resolution. 3- w, 4- h
# ret = camera.set(4, 240)
cv2.waitKey(1)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide.
# cv2.imwrite(file, camera_capture)
if ret:
frame_index_i = frame_index_i + 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
print 'hsv done'
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
print 'back project done'
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
print 'ret'
xt, yt, wt, ht = track_window
# hy info
print 'xt,yt,wt,ht:', xt, ',', yt, ',', wt, ',' , ht
# hy: draw rectangle as tracked window area
cv2.rectangle(frame, (xt, yt), (xt + wt, yt + ht), 255, 2)
cv2.putText(frame, 'tracked', (xt - 25, yt - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.CV_AA)
cv2.waitKey(100)
###############################################################################################
# evaluate
###############################################################################################
# hy: info
# print "shape in evaluate:x1,y1:", crop_x1, ',', crop_y1
crop_x1 = xt
crop_x2 = xt + wt
crop_y1 = yt
area_step_size = ht
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
print "shape after set_testClass:y1, y2, x1, x2:", crop_y1,',', crop_y2, ',', crop_x1, ',', crop_x2
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
#######################################################################################
# Evaluate
#######################################################################################
# hy: info
# print "shape:y1,y2,x1,x2:", crop_y1," ", crop_y2," ", crop_x1," ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY),
width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 200))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', frame_index_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
if int(RES) == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % frame_index_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % frame_index_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted1: " + LABELS[RES], org=(w_frame / 10, h_frame / 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
#cv2.putText(frame, "predicted2: " + LABELS[RES], org=(w_frame / 10, h_frame / 20),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=5, color=(0, 255, 0), thickness=5)
output_display = str(output[0][RES])[:4]
cv2.putText(frame, "prob:" + output_display, org=(w_frame / 10, h_frame / 8),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=1)
#cv2.putText(frame, "predicted1: " + LABELS[RES] + ", prob:" + output[RES], org=(w_frame / 6, h_frame / 10),
# fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=3)
# hy: could be modified to display desired label
# cv2.putText(frame, LABELS[RES], org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
# cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
else:
print 'no frame retrieved'
break
#hy TODO add termination condition
del(camera)
def Evaluate_VIDEO_track_roi(VIDEO_FILE,num_class):
n_classes = num_class
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
# Read the first frame of the video
ret, frame = video.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
###############################################################################################
# Track
###############################################################################################
c,r,w,h = 600,450,600,600
track_window = (c, r, w, h)
#track_window = (x0, y0, w, h)
# Create mask and normalized histogram
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_cond = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) #hy: TERM_CRITERIA_EPS - terminate iteration condition
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
video_frame_i = 0
while True:
ret, frame = video.read()
if ret:
video_frame_i = video_frame_i + 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_cond)
xt, yt, wt, ht = track_window
#hy info
#print 'xt,yt,wt,ht:', xt, ',', yt, ',', wt, ',' , ht
#hy: draw rectangle as tracked window area
cv2.rectangle(frame, (xt,yt), (xt+wt,yt+ht), 255, 2)
cv2.putText(frame, 'tracked', (xt-25,yt-10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255,255,255), 2, cv2.CV_AA)
cv2.waitKey(500)
###############################################################################################
# evaluate
###############################################################################################
# hy: info
#print "shape in evaluate:x1,y1:", crop_x1, ',', crop_y1
crop_x1 = xt
crop_x2 = xt + wt
crop_y1 = yt
area_step_size = ht
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
#print "shape after set_testClass:y1, y2, x1, x2:", crop_y1,',', crop_y2, ',', crop_x1, ',', crop_x2
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# hy: info
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 200))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
#print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', video_frame_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
print "label, predict =", video_label, ', ', RES # hy
if int(RES) == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
confMat2_TEST_Video[label2_TEST_Video, pred2_TEST_Video] = confMat2_TEST_Video[
label2_TEST_Video, pred2_TEST_Video] + 1
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted: " + LABELS[RES], org=(w_frame / 3, h_frame / 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0, 255, 0), thickness=4)
cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
# hy: other options - control to move ROI downwards, then to the right
# crop_y1 = crop_y1 + area_step_size/50
# if crop_y2+area_step_size >= frame.shape[0]:
# crop_y1 = 0
# crop_x1 = crop_x1 + 200
# if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
# break
else:
print 'no frame retrieved'
break # hy added
tp = confMat2_TEST_Video[0, 0]
tn = confMat2_TEST_Video[1, 1]
# print confMat2_TEST_Video
# print 'tp, tn, total number of test images:', tp, ', ', tn, ', ', tp + tn
print confMat2_TEST_Video
print 'TEST acc:', "{:.4f}".format(tp / (tp + tn))
cv2.waitKey(100)
if cv2.waitKey(1) & 0xFF == ord('q'): # hy:press key-q to quit
break
###############################################################################################
#cv2.imshow('Tracking', frame)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#else:
#print 'no frame received for tracking'
#break
def EVALUATE_VIDEO(VIDEO_FILE,num_class):
n_classes = num_class
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
# cv2.waitKey(10)
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
# video.open(VIDEO_FILE)
# hy: for debug
if not video.isOpened():
print "cannot find or open video file"
exit(-1)
## Reading the video file frame by frame
# hy: initialization of confmatrix
confMat2_TEST_Video = np.zeros((2, 2), dtype=np.float)
video_frame_i = 0
while True:
video_frame_i += 1
ret, frame = video.read()
if ret:
# hy:
h_frame = frame.shape[0]
w_frame = frame.shape[1] # hy: h 1536 x w 2304
# print "h_video and w_video", h_resize, ",", w_resize
# cv2.imshow("ori", frame)
# print "frame size hxw", frame.shape[0]," ", frame.shape[1]
crop_x2 = crop_x1 + area_step_size
crop_y2 = crop_y1 + area_step_size * settings.h_resize / settings.w_resize
# Crop
# frame_crop = frame[350:750, 610:1300] #hy: ori setting for w24xh42
# hy: select suitable values for the area of cropped frame,
# adapt to the ratio of h to w after resized, e.g. 42x42 ie.w=h
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
# hy: info
print "shape:y1,y2,x1,x2:", crop_y1,", ", crop_y2,", ", crop_x1,", ", crop_x2
# print "Shape of cropped frame:", frame_crop.shape #hy: it should be same as the shape of trained images(the input image)
cv2.imshow("frame_cropped", frame_crop)
# Resize
# frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=42)
# hy: it is something different to video_crop_tool.py, here for tensorflow the width has to be that of input image
frame_crop_resize_gray = imutils.resize(cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY), width=settings.w_resize)
# hy:extra Debug
# print "Shape of cropped frame in grayscale:", frame_crop_resize_gray.shape
im = frame_crop_resize_gray # Lazy
im = np.asarray(im, np.float32)
cv2.imshow("TensorFlow Window", imutils.resize(im.astype(np.uint8), 227))
# Adding noise to the street image #TODO
# im=add_noise(im,5)
# Bluring the image to help detection #TODO
# im = cv2.GaussianBlur(im,(5,5),0)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# hy: info
# print "Image size (wxh):", im.size #hy
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
# print test_image
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
batch_xs, batch_ys = test_image, test_lables
# print 'batch_xs, batch_ys:', batch_xs, ', ', batch_ys
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
# print output
output = tools.convert_to_confidence(output)
np.set_printoptions(precision=2)
print '\nFrame', video_frame_i
tools.print_label_title_conf()
print 'confidence =', output # hy
RES = np.argmax(output)
#hy: for sub-classes
#label_pred_str, label_pred_num = tools.convert_result(RES) # hy use it when sub-classes are applied
#RES_sub_to_face = class_label #hy added
print "label, predict =", video_label, ', ', RES # hy
if RES == video_label:
label2_TEST_Video = 0
pred2_TEST_Video = 0
name_str = settings.CorrectClassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_CorrectClassified_frame(name_str, frame_crop, SAVE_CorrectClassified)
else:
label2_TEST_Video = 1
pred2_TEST_Video = 1
name_str = settings.Misclassified + "/frame_crop%d.jpg" % video_frame_i
tools.SAVE_Misclassified_frame(name_str, frame_crop, SAVE_Misclassified)
confMat2_TEST_Video[label2_TEST_Video, pred2_TEST_Video] = confMat2_TEST_Video[
label2_TEST_Video, pred2_TEST_Video] + 1
# Make a little demonstration (hy:static window version)
# hy: showing evaluation result identified class on video
# if RES == 0 or RES == 2:
# cv2.rectangle(frame,(610, 350), (1300, 750), color=(0, 255, 0), thickness=20)
# cv2.putText(frame, 'Available', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0,255,0),thickness=4)
# else:
# cv2.rectangle(frame,(610, 350), (1300, 750), color=(0, 0, 255), thickness=20)
# cv2.putText(frame, 'Occupied', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0,0,255), thickness=4)
# hy: TODO adapt to current app
# if RES == 0 or RES == 2:
# cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=20)
# cv2.putText(frame, 'Available', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
# color=(0, 255, 0), thickness=4)
# else:
# cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 0, 255), thickness=20)
# cv2.putText(frame, 'Occupied', org=(800, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
# color=(0, 0, 255), thickness=4)
label_pred_str = LABELS[RES][:-1]
cv2.rectangle(frame, (crop_x1, crop_y1), (crop_x2, crop_y2), color=(0, 255, 0), thickness=1)
cv2.putText(frame, "predicted: " + label_pred_str, org=(w_frame / 3, h_frame / 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(0, 255, 0), thickness=4)
# hy: could be modified to display desired label
# cv2.putText(frame, label_pred_str, org=(800, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=3 )
cv2.putText(frame, str(video.get(1)), org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
color=(0, 255, 0), thickness=1)
#cv2.putText(frame, label_pred_str, org=(20, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
# color=(0, 255, 0), thickness=1)
frame_demo = imutils.resize(frame, width=1200)
# frame_demo = imutils.resize(frame, width = min(1200, settings.w_resize * 30)) #hy: choose a smaller window size
cv2.imshow("Demo", frame_demo)
cv2.waitKey(300)
# hy: other options - control to move ROI downwards, then to the right
# crop_y1 = crop_y1 + area_step_size/50
# if crop_y2+area_step_size >= frame.shape[0]:
# crop_y1 = 0
# crop_x1 = crop_x1 + 200
# if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
# break
else:
print 'no frame retrieved'
break # hy added
tp = confMat2_TEST_Video[0, 0]
tn = confMat2_TEST_Video[1, 1]
# print confMat2_TEST_Video
# print 'tp, tn, total number of test images:', tp, ', ', tn, ', ', tp + tn
print confMat2_TEST_Video
print 'TEST acc:', "{:.4f}".format(tp / (tp + tn))
if cv2.waitKey(1) & 0xFF == ord('q'): # hy:press key-q to quit
break
#def dense_to_one_hot(labels_dense, num_classes=n_classes):
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
if DEBUG == 1:
print 'one_hot_vector:', labels_one_hot[0]
return labels_one_hot
#####################################################################################################
################## TEST with Video ###########################
#####################################################################################################
if TEST_with_Video:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="./tensor_model_sum/")
saver = tf.train.import_meta_graph(trained_model) # (eva)
if ckpt and ckpt.model_checkpoint_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt.model_checkpoint_path)
print "Evaluation with video, model", ckpt.model_checkpoint_path
else:
print 'not found model'
print 'Test with video starting ...'
#for video_index in xrange(1):
video_list = ['hinten/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn/']
#for video_index in xrange(1):
for video_index in xrange(len(video_list)):
#TestFace = settings.LABELS[0][:-1] # only one
TestFace = video_list[video_index][:-1] # all # full, 0 hinten, 1 links, 2 oben, 3 rechts, 4 unten, 5 vorn,
print 'Test face:', TestFace
#TestFace = settings.LABELS[video_index][:-1] #'vorn' #'hinten' # full, 0 hinten, 1 links, 2 oben, 3 rechts, 4 unten, 5 vorn,
VIDEO_FILE, crop_x1, crop_y1, area_step_size, video_label = tools.set_TestClass(TestFace, video_window_scale)
# hy: info
#print "shape after set_testClass:x1,y1:", crop_x1, ", ", crop_y1
#track_frame = track_roi(VIDEO_FILE)
#Evaluate_VIDEO_track_roi(VIDEO_FILE)
EVALUATE_VIDEO(VIDEO_FILE, n_classes)
print 'test face:', TestFace, 'done\n'
#TestFace = 'vorn'
#VIDEO_FILE, crop_x1, crop_y1, area_step_size, video_label = tools.set_TestClass(TestFace, video_window_scale)
#EVALUATE_VIDEO(VIDEO_FILE)
#print 'test face:', TestFace, 'done\n'
#hy: another option - automatically move ROI downwards, then to the right
#crop_y1 = crop_y1 + area_step_size/50
#if crop_y2+area_step_size >= frame.shape[0]:
#crop_y1 = 0
#crop_x1 = crop_x1 + 200
#if crop_x2+area_step_size >= frame.shape[1]:
##crop_x1 = 0
#break
#####################################################################################################
##hy: ################ TEST with IMAGES #######################
#####################################################################################################
init = tf.initialize_all_variables() #hy
if TEST_with_Images:
#hy: use a previous model
#hy: load model at checkpoint
#model 1
#'''
with tf.Session() as sess:
#hy: load saved model with values
#ckpt = tf.train.get_checkpoint_state(checkpoint_dir="") # "./backupModel/"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="./tensor_model_sum/") # "./backupModel/"
saver = tf.train.import_meta_graph(trained_model) #(eva)
#saver = tf.train.import_meta_graph("./tensor_model_sum/model_GD3conv360_h184_w184_c63conv_O0.75_U1.0_1_0.79-431.meta") #(eva)
#model_GD3conv360_h184_w184_c63conv_H0.7_0_0.57_U1.0_0_0.57-901 0.33
#last 0.26
#model_GD3conv360_h184_w184_c63conv_U1.0_2_0.65-1291 0.263
#model_GD3conv360_h184_w184_c63conv_H0.75_2_0.58_U1.0_2_0.58-1131 0.36
#model_GD3conv200_h184_w184_c6_3conv_O0.73_0_0.42-2961 0.592 (new test files) -- top
#model_GD2000_h184_w184_c6_II_O0.64_0_0.42-1061 0.51(new test files)
#
if ckpt and ckpt.model_checkpoint_path:
saver = tf.train.Saver()
saver.restore(sess,ckpt.model_checkpoint_path)
else:
print 'not found model'
print 'I-Test with Images starting ...' #print 'Test with images starting ...', ckpt.model_checkpoint_path
#sess.run(init)
#hy: evaluate
eval_file_list = LABEL_LIST_TEST
#LABELS = ['links/', 'rechts/']
#confMat1_TEST_i,count_labels,confMat3,class_probability = EVALUATE_IMAGES(sess,6, eval_file_list, LABELS) #
try:
confMat1_TEST_i,count_labels,confMat3,class_probability = EVALUATE_IMAGES(sess,6, eval_file_list, LABELS) #
'''
print '\nCount correctly classified'
tools.print_label_title()
print confMat3
print 'Total labels'
print count_labels
print '\nProportion of correctly classified'
for pos in range(0, n_classes, 1):
if count_labels[:, pos] > 0:
class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
print class_probability
'''
print 'Count classified in each class for detailed analysis'
tools.print_label_title()
print confMat1_TEST_i
except:
print '\n[Hint] If error, check settings - tensor input size and n_class, n_hidden all should be the same as given in log file name'
#model 2
'''
new_graph = tf.Graph()
with tf.Session(graph=new_graph) as sess2:
# method 2 must initial sess
# hy: load saved model with values
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="./tensor_model_pre/") # ./backupModel/
saver2 = tf.train.import_meta_graph("./tensor_model_pre/model_GD60_h42_w42_c6_II1821.meta")
if ckpt and ckpt.model_checkpoint_path:
print '\n################################\nsecond model'
saver2 = tf.train.Saver()
saver2.restore(sess2, ckpt.model_checkpoint_path)
else:
print 'not found model'
print 'II-Test with Images starting ...' # print 'Test with images starting ...', ckpt.model_checkpoint_path
#sess.run(init)
# hy: evaluate
eval_file_list2 = LABEL_LIST_TEST
LABELS = ['hinten/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn/']
confMat1_TEST_i = EVALUATE_IMAGES(sess2,6,eval_file_list2, LABELS)
print 'Count classified in each class for detailed analysis'
tools.print_label_title()
print confMat1_TEST_i
'''
#####################################################################################################
##hy: ################ App for IMAGES #######################
#####################################################################################################
init = tf.initialize_all_variables() # hy
def get_precision(session,im):
sess = session
im = np.asarray(im, np.float32)
CONF = 0.20
test_image = im
test_lables = np.zeros((1, n_classes)) # Making a dummy label tp avoid errors as initial predict
# Doing something very stupid here, fix it!
test_image = im.reshape((-1, im.size))
# print test_image
# print sess.run(test_image)
test_image = np.expand_dims(np.array(test_image), 2).astype(np.float32)
test_image = test_image / 255 - 0.5 # TODO here is tricky, double check wit respect to the formats
# hy: evaluate
batch_xs, batch_ys = test_image, test_lables
# output = sess.run("Accuracy:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
output = sess.run("pred:0", feed_dict={"x:0": batch_xs, "y:0": batch_ys, "keep_prob:0": 1.})
# print("Output for external=",output)
output = tools.convert_to_confidence(output) #
return output
if App_for_Images:
# model 1
# '''
#total_images, digits, carimages, cartargets, f, val2_digits, val2_images, val2_targets, val2_f = import_data()
carimages, cartargets, f = tools.read_images(LABEL_LIST_TEST)
print '1 file', LABEL_LIST_TEST
TEST_length = len(carimages)
# carimages = carimages / 255 - 0.5 #TODO here is tricky, double check wit respect to the formats
confMat1_TEST = np.zeros((n_classes, n_classes), dtype=np.float) # hy collect detailed confusion matrix
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
confMat3 = np.zeros((1, n_classes), dtype=np.float)
count_labels = np.zeros((1, n_classes), dtype=np.float)
class_probability = np.zeros((1, n_classes), dtype=np.float)
for i in range(0, TEST_length, 1):
new_graph = tf.Graph()
with tf.Session(graph=new_graph) as sess:
# hy: load saved model with values
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="./backupModel/") # ./backupModel/
saver = tf.train.import_meta_graph("model_graph_GD_c6_491_oben_unten.meta")
if ckpt and ckpt.model_checkpoint_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print 'not found model'
im = carimages[i]
# im = frame_crop_resize_gray # Lazy
output1 = get_precision(sess,im)
tools.print_label_title()
np.set_printoptions(precision=3)
print 'output1', output1
# model 2
new_graph = tf.Graph()
with tf.Session(graph=new_graph) as sess2:
# method 2 must initial sess after adding operation and before run
# hy: load saved model with values
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="") # ./backupModel/
saver2 = tf.train.import_meta_graph("model_graph_GD_c6_601_links_25_oben_91_unten_83_vorn_70.meta")
if ckpt and ckpt.model_checkpoint_path:
print '################################\nsecond model'
saver2 = tf.train.Saver()
saver2.restore(sess2, ckpt.model_checkpoint_path)
else:
print 'not found model'
print 'II-Test with Images starting ...' # print 'Test with images starting ...', ckpt.model_checkpoint_path
#sess2.run(init)
# hy: evaluate
output2 = get_precision(sess2,im)
tools.print_label_title()
print 'output2', output2
output = output1+output2
print 'output', output
RES = np.argmax(output) # hy predicted label
label_target = int(cartargets[i]) # hy ground truth label
#print 'label_target',i,':', label_target
# label_pred_str, label_pred_num = tools.convert_result(RES)
# label_target_str, label_target_num = tools.convert_result(label_target)
print '\nTestImage', i + 1, ':', f[i]
# print 'Image name', carimages
print 'Ground truth label:', LABELS[label_target][:-1], '; predict:', LABELS[RES][:-1] # hy
# print 'Target:', label_target, '; predict:', RES # hy
label = label_target
predict = int(RES)
confMat1_TEST[label, predict] = confMat1_TEST[label, predict] + 1
count_labels[:, label] = count_labels[:, label] + 1
if predict == label_target:
label2_TEST = 0
pred2_TEST = 0
confMat3[:, int(RES)] = confMat3[:, int(RES)] + 1
tools.SAVE_CorrectClassified_Img(f[i], SAVE_CorrectClassified)
else:
label2_TEST = 1
pred2_TEST = 1
tools.SAVE_Misclassified_Img(f[i], SAVE_Misclassified)
confMat2_TEST[label2_TEST, pred2_TEST] = confMat2_TEST[label2_TEST, pred2_TEST] + 1
tp = confMat2_TEST[0, 0]
tn = confMat2_TEST[1, 1]
print '\nRank list of predicted results'
tools.rank_index(output[0], label_target)
print '\nCount correctly classified'
tools.print_label_title()
print confMat3
print 'Total labels'
print count_labels
print 'Proportion of correctly classified'
if count_labels[:, pos] > 0:
for pos in range(0, n_classes, 1):
class_probability[:, pos] = confMat3[:, pos] / count_labels[:, pos]
print class_probability
# print '\ntp, tn, total number of test images:', tp, ', ', tn, ', ', TEST_length
# print confMat2_TEST
print '\nTEST general count:'
print confMat2_TEST
print 'TEST overall acc:', "{:.3f}".format(tp / TEST_length)
#matrix count
tools.print_label_title()
print confMat1_TEST
print '\n'
######################################################################################
######################################################################################
#https://github.com/tensorflow/tensorflow/issues/3270 load two models
# hy option2
#EVALUATE_IMAGES_VAGUE()
#####################################################################################################
##hy: ################ Test with Webcam #######################
#####################################################################################################
if TEST_with_Webcam:
with tf.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Evaluation live frames with", ckpt.model_checkpoint_path
else:
print 'not found model'
print 'Test with Webcam starting ...'
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
#EVALUATE_WITH_WEBCAM_track_roi(camera_port,n_classes)
EVALUATE_WITH_WEBCAM(camera_port, False,n_classes)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
## TEST with WEBCAM END
cv2.waitKey(0)
cv2.destroyAllWindows()
# hy:total time
#####################################################################################################
##hy: ################ Test End #######################
#####################################################################################################
elapsed_time = time.time() - start_time
print 'Total elapsed time:', "{:.2f}".format(elapsed_time / 60), 'min'
|
apache-2.0
|
daleloogn/BUG-smacpy-GMM
|
smacpy.py
|
2
|
10016
|
#!/bin/env python
#
# smacpy - simple-minded audio classifier in python
#
# Copyright (c) 2012 Dan Stowell and Queen Mary University of London
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
import numpy as np
import argparse
from glob import glob
from scikits.audiolab import Sndfile
from scikits.audiolab import Format
from sklearn.mixture import GMM
from MFCC import melScaling
#######################################################################
# some settings
framelen = 1024
fs = 44100.0
verbose = True
#######################################################################
# main class
class Smacpy:
"""Smacpy - simple-minded audio classifier in python. See the README file for more details.
USAGE EXAMPLE:
In this hypothetical example we train on four audio files, labelled as either 'usa' or 'uk', and then test on a separate audio file of someone called hubert:
from smacpy import Smacpy
model = Smacpy("wavs/training", {'karen01.wav':'usa', 'john01.wav':'uk', 'steve02.wav':'usa', 'joe03.wav':'uk'})
model.classify('wavs/testing/hubert01.wav')
Note for developers: this code should aim to be understandable, and not too long. Don't add too much functionality, or efficiency ;)
"""
def __init__(self, wavfolder, trainingdata):
"""Initialise the classifier and train it on some WAV files.
'wavfolder' is the base folder, to be prepended to all WAV paths.
'trainingdata' is a dictionary of wavpath:label pairs."""
self.mfccMaker = melScaling(int(fs), framelen/2, 40)
self.mfccMaker.update()
allfeatures = {wavpath:self.file_to_features(os.path.join(wavfolder, wavpath)) for wavpath in trainingdata}
# Determine the normalisation stats, and remember them
allconcat = np.vstack(list(allfeatures.values()))
self.means = np.mean(allconcat, 0)
self.invstds = np.std(allconcat, 0)
for i,val in enumerate(self.invstds):
if val == 0.0:
self.invstds[i] = 1.0
else:
self.invstds[i] = 1.0 / val
# For each label, compile a normalised concatenated list of features
aggfeatures = {}
for wavpath, features in allfeatures.items():
label = trainingdata[wavpath]
normed = self.__normalise(features)
if label not in aggfeatures:
aggfeatures[label] = normed
else:
aggfeatures[label] = np.vstack((aggfeatures[label], normed))
# For each label's aggregated features, train a GMM and remember it
self.gmms = {}
for label, aggf in aggfeatures.items():
if verbose: print(" Training a GMM for label %s, using data of shape %s" % (label, str(np.shape(aggf))))
self.gmms[label] = GMM(n_components=10) # , cvtype='full')
self.gmms[label].fit(aggf)
if verbose: print(" Trained %i classes from %i input files" % (len(self.gmms), len(trainingdata)))
def __normalise(self, data):
"Normalises data using the mean and stdev of the training data - so that everything is on a common scale."
return (data - self.means) * self.invstds
def classify(self, wavpath):
"Specify the path to an audio file, and this returns the max-likelihood class, as a string label."
features = self.__normalise(self.file_to_features(wavpath))
# For each label GMM, find the overall log-likelihood and choose the strongest
bestlabel = ''
bestll = -9e99
for label, gmm in self.gmms.items():
ll = gmm.eval(features)[0]
ll = np.sum(ll)
if ll > bestll:
bestll = ll
bestlabel = label
return bestlabel
def file_to_features(self, wavpath):
"Reads through a mono WAV file, converting each frame to the required features. Returns a 2D array."
if verbose: print("Reading %s" % wavpath)
if not os.path.isfile(wavpath): raise ValueError("path %s not found" % wavpath)
sf = Sndfile(wavpath, "r")
#if (sf.channels != 1) and verbose: print(" Sound file has multiple channels (%i) - channels will be mixed to mono." % sf.channels)
if sf.samplerate != fs: raise ValueError("wanted sample rate %g - got %g." % (fs, sf.samplerate))
window = np.hamming(framelen)
features = []
while(True):
try:
chunk = sf.read_frames(framelen, dtype=np.float32)
if len(chunk) != framelen:
print("Not read sufficient samples - returning")
break
if sf.channels != 1:
chunk = np.mean(chunk, 1) # mixdown
framespectrum = np.fft.fft(window * chunk)
magspec = abs(framespectrum[:framelen/2])
# do the frequency warping and MFCC computation
melSpectrum = self.mfccMaker.warpSpectrum(magspec)
melCepstrum = self.mfccMaker.getMFCCs(melSpectrum,cn=True)
melCepstrum = melCepstrum[1:] # exclude zeroth coefficient
melCepstrum = melCepstrum[:13] # limit to lower MFCCs
framefeatures = melCepstrum # todo: include deltas? that can be your homework.
features.append(framefeatures)
except RuntimeError:
break
sf.close()
return np.array(features)
#######################################################################
def trainAndTest(trainpath, trainwavs, testpath, testwavs):
"Handy function for evaluating your code: trains a model, tests it on wavs of known class. Returns (numcorrect, numtotal, numclasses)."
print("TRAINING")
model = Smacpy(trainpath, trainwavs)
print("TESTING")
ncorrect = 0
for wavpath,label in testwavs.items():
result = model.classify(os.path.join(testpath, wavpath))
if verbose: print(" inferred: %s" % result)
if result == label:
ncorrect += 1
return (ncorrect, len(testwavs), len(model.gmms))
#######################################################################
# If this file is invoked as a script, it carries out a simple runthrough
# of training on some wavs, then testing, with classnames being the start of the filenames
if __name__ == '__main__':
# Handle the command-line arguments for where the train/test data comes from:
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--trainpath', default='wavs', help="Path to the WAV files used for training")
parser.add_argument('-T', '--testpath', help="Path to the WAV files used for testing")
parser.add_argument('-q', dest='quiet', action='store_true', help="Be less verbose, don't output much text during processing")
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--charsplit', default='_', help="Character used to split filenames: anything BEFORE this character is the class")
group.add_argument('-n', '--numchars' , default=0 , help="Instead of splitting using 'charsplit', use this fixed number of characters from the start of the filename", type=int)
args = vars(parser.parse_args())
verbose = not args['quiet']
if args['testpath']==None:
args['testpath'] = args['trainpath']
# Build up lists of the training and testing WAV files:
wavsfound = {'trainpath':{}, 'testpath':{}}
for onepath in ['trainpath', 'testpath']:
pattern = os.path.join(args[onepath], '*.wav')
for wavpath in glob(pattern):
if args['numchars'] != 0:
label = os.path.basename(wavpath)[:args['numchars']]
else:
label = os.path.basename(wavpath).split(args['charsplit'])[0]
shortwavpath = os.path.relpath(wavpath, args[onepath])
wavsfound[onepath][shortwavpath] = label
if len(wavsfound[onepath])==0:
raise RuntimeError("Found no files using this pattern: %s" % pattern)
if verbose:
print("Class-labels and filenames to be used from %s:" % onepath)
for wavpath,label in sorted(wavsfound[onepath].items()):
print(" %s: \t %s" % (label, wavpath))
if args['testpath'] != args['trainpath']:
# Separate train-and-test collections
ncorrect, ntotal, nclasses = trainAndTest(args['trainpath'], wavsfound['trainpath'], args['testpath'], wavsfound['testpath'])
print("Got %i correct out of %i (trained on %i classes)" % (ncorrect, ntotal, nclasses))
else:
# This runs "stratified leave-one-out crossvalidation": test multiple times by leaving one-of-each-class out and training on the rest.
# First we need to build a list of files grouped by each classlabel
labelsinuse = sorted(list(set(wavsfound['trainpath'].values())))
grouped = {label:[] for label in labelsinuse}
for wavpath,label in wavsfound['trainpath'].items():
grouped[label].append(wavpath)
numfolds = min(len(collection) for collection in grouped.values())
# Each "fold" will be a collection of one item of each label
folds = [{wavpaths[index]:label for label,wavpaths in grouped.items()} for index in range(numfolds)]
totcorrect, tottotal = (0,0)
# Then we go through, each time training on all-but-one and testing on the one left out
for index in range(numfolds):
print("Fold %i of %i" % (index+1, numfolds))
chosenfold = folds[index]
alltherest = {}
for whichfold, otherfold in enumerate(folds):
if whichfold != index:
alltherest.update(otherfold)
ncorrect, ntotal, nclasses = trainAndTest(args['trainpath'], alltherest, args['trainpath'], chosenfold)
totcorrect += ncorrect
tottotal += ntotal
print("Got %i correct out of %i (using stratified leave-one-out crossvalidation, %i folds)" % (totcorrect, tottotal, numfolds))
|
apache-2.0
|
tharunkalwa/pympler
|
pympler/charts.py
|
7
|
1974
|
"""
Generate charts from gathered data.
Requires **matplotlib**.
"""
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def tracker_timespace(filename, stats):
"""
Create a time-space chart from a ``Stats`` instance.
"""
classlist = list(stats.index.keys())
classlist.sort()
for snapshot in stats.snapshots:
stats.annotate_snapshot(snapshot)
timestamps = [fp.timestamp for fp in stats.snapshots]
offsets = [0] * len(stats.snapshots)
poly_labels = []
polys = []
for clsname in classlist:
pct = [fp.classes[clsname]['pct'] for fp in stats.snapshots]
if max(pct) > 3.0:
sizes = [fp.classes[clsname]['sum'] for fp in stats.snapshots]
sizes = [float(x)/(1024*1024) for x in sizes]
sizes = [offset+size for offset, size in zip(offsets, sizes)]
poly = matplotlib.mlab.poly_between(timestamps, offsets, sizes)
polys.append( (poly, {'label': clsname}) )
poly_labels.append(clsname)
offsets = sizes
fig = plt.figure(figsize=(10, 4))
axis = fig.add_subplot(111)
axis.set_title("Snapshot Memory")
axis.set_xlabel("Execution Time [s]")
axis.set_ylabel("Virtual Memory [MiB]")
totals = [x.asizeof_total for x in stats.snapshots]
totals = [float(x)/(1024*1024) for x in totals]
axis.plot(timestamps, totals, 'r--', label='Total')
tracked = [x.tracked_total for x in stats.snapshots]
tracked = [float(x)/(1024*1024) for x in tracked]
axis.plot(timestamps, tracked, 'b--', label='Tracked total')
for (args, kwds) in polys:
axis.fill(*args, **kwds)
axis.legend(loc=2) # TODO fill legend
fig.savefig(filename)
except ImportError:
def tracker_timespace(*_args):
pass
|
apache-2.0
|
jhmatthews/cobra
|
source/pfgrid.py
|
1
|
7287
|
#! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
'''
pfgrid.py
Synopsis:
Generates a grid of parameter files for the python radiative transfer code.
Note that this will only generate models with the same general type due to keyword order.
'''
help_string='''
pfgrid.py
Synopsis:
Generates a grid of parameter files for the python radiative transfer code.
Note that this will only generate models with the same general type due to keyword order.
Usage:
pfgrid.py [-h]
'''
import sys, os
import numpy as np
import disk as ast
import pylab
from disky_const import *
if len(sys.argv)>1:
if sys.argv[0] == "-h" or sys.argv[0] == "h" or sys.argv[0] == "help":
print help_string
# read in template file
template = np.loadtxt('template.pf', dtype='string', unpack=True)
'''# read in grid of values to change
grid = np.loadtxt('grid_values', dtype='string', unpack=True, comments ="#")
keywords_to_change = grid[0]
values_to_change = grid[1:]
nx = len(values_to_change[0])
ny = len(keywords_to_change)
# calculate total number of runs
n_runs = nx ** ny
print "Welcome to pfgrid. Creating grids for %i runs..." % n_runs
print "You have %i variables with %i degrees of freedom" % (ny, nx)
print keywords_to_change
sys.exit()
all_values = []
for i in range(n_runs):
all_values.append(values)'''
# you know have n_runs copies of the initial parameter file
#all_values = np.array(values)
# standard alpha of power law
alpha_pl = -0.9
n = 0
MBH = np.array([1.0e7, 1.0e8,1.0e9]) # black holes masses
EDD = np.array([0.1, 0.2, 0.5]) # eddington fractions
L_X = np.array([1e43, 1e44, 1e45]) # X-ray luminosities
MDOT_FRAC = np.array([0.1, 1.0, 10.0]) # mdot wind as fraction of mdot acc
# lengths of arrays
nmasses = len(MBH)
nagn = len(L_X)
nwind = len(MDOT_FRAC)
ndisk = len(EDD)
print "Fiducial model:"
print "-----------------------"
m = 1.0e9; mdot = 5.0; lum_agn = 1.0e43
print "L_bol: %8.4e" % ast.L_bol ( mdot, m)
#sys.exit()
L_2kev = ast.L_two ( lum_agn , -0.9)
L_2500 = ast.L_2500 ( mdot, m )
alpha_ox = ast.alp_ox ( L_2kev, L_2500 )
print "L_2kev: %8.4e L_2500: %8.4e alpha_ox: %f" % (L_2kev, L_2500, alpha_ox)
freq_disk, specdisk = ast.spec_disk(1e14,1e18,m,mdot,8.85667e+14,1e17)
def strip(character, string):
new_string = ""
for s in string:
if s != character:
new_string += s
return new_string
'''
#print freq_disk, specdisk
pylab.plot ( freq_disk, specdisk )
pylab.show()
print np.sum( specdisk)
sys.exit()
'''
alphas = [[], [], []]
lums = [[], [], []]
lx = [[], [], []]
roots=[]
for i in range( nmasses):
m = MBH[i]
for j in range( ndisk):
edd_frac = EDD [j]
mdot = ast.mdot_from_edd ( edd_frac, m , eta = 0.1)
for k in range (nagn):
Lbol = edd_frac * ast.Ledd(m)
frac_bh = Lbol / 2.5e46 # scale our L_x with Lbol
lum_agn = L_X [k] * ( frac_bh)
for l in range(nwind):
mdotwind = MDOT_FRAC[l] * mdot
root = "run%i_edd%.1f_w%.1f_l%i_bh%i" % ( n, edd_frac, mdotwind, np.log10(lum_agn), np.log10(m) )
root = strip(".", root)
roots.append(root)
filename = "%s.pf" % root
inp = open(filename, "w")
write_array = template
L_2kev = ast.L_two ( lum_agn , -0.9)
L_2500 = ast.L_2500 ( mdot , m ) * np.cos (PI * 40.0 /180.0 )
alpha_ox = ast.alp_ox ( L_2kev, L_2500 )
print "\nModel %i %s" % (n, root)
print "-----------------------"
print "L_2kev: %8.4e L_2500: %8.4e alpha_ox: %f" % (L_2kev, L_2500, alpha_ox)
print "mdot %8.4e mdotwind %8.4e L_X %8.4e" % (mdot, mdotwind, lum_agn)
print "L_bol %8.4e" % Lbol
#print "L_X: %8.4e L_bol: %8.4e" % (lum_agn, lum_bol)'''
'''freq_disk, specdisk = ast.spec_disk(1e14,1e18,m,mdot,8.85667e+14,1e17)
print np.sum( specdisk)'''
index = np.where(write_array[0] == "mstar(msol)")
write_array [1][ index ] = str ( m )
index = np.where(write_array[0] =="lum_agn(ergs/s) ")
write_array [1][ index ] = str (lum_agn )
index = np.where ( write_array[0] =="disk.mdot(msol/yr)")
write_array [1][ index ] = str (mdot)
index = np.where(write_array[0] =="wind.mdot(msol/yr)")
write_array [1][ index ] = str (mdotwind)
#filename = "run_%i" % n
write_array = np.transpose ( write_array)
np.savetxt(filename, write_array, fmt="%s\t%s")
n += 1
alphas[i].append(alpha_ox)
lums[i].append(L_2500 ) # this is the 40 degree L_2500
lx[i].append(lum_agn)
lums = np.array(lums)
#lums = lums * np.cos (PI * 40.0 /180.0 )
colors = ['r', 'g', 'b' ]
labels = ['1e7', '1e8', '1e9']
fit = [ [10.0**27.5, 10.0**32.5], [-1.1, -1.8] ]
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(211)
for i in range(nmasses):
ax.scatter(lums[i], alphas[i], label=labels[i], c=colors[i])
ax.plot(fit[0], fit[1])
ax.set_xscale('log')
ax.set_ylabel("alpha_ox estimate")
ax.set_xlabel("L_2500 estimate")
ax = fig.add_subplot(212)
for i in range(nmasses):
ax.scatter(lx[i], alphas[i], label=labels[i], c=colors[i])
ax.set_xscale('log')
ax.set_ylabel("alpha_ox estimate")
ax.set_xlabel("L_x")
plt.legend()
plt.savefig("shankar.png")
print "writing to script..."
nscripts = len(roots) / 6.0
# check if there is a remainder
if float(int(nscripts)) != nscripts:
remainder = len(roots) - ( nscripts * 6)
nscripts = int(nscripts) + 1
print nscripts, remainder, len(roots), float(int(nscripts))
for i in range(nscripts):
inp = open("script%i" % i, "w")
inp.write("#!/bin/bash\n\n")
inp.write("cd /home/jm8g08/grid/grid_shankar\n\n")
first = i*6
last = (i+1) * 6
if last > len(roots): last = len(roots)
for root in roots[first:last]:
print root
inp.write("/home/jm8g08/Python/bin/py76c_dev %s > %s.out &\n" % (root,root))
inp.write("wait\n")
inp.close()
print "writing to Daddy script..."
inp = open("daddy_script", "w")
inp.write("#!/bin/bash\n\n")
for i in range(nscripts):
inp.write("qsub -l nodes=1:ppn=6 -l walltime=30:00:00 script%i\n" % i)
inp.close()
print "All done!"
''''
for i in range(ny):
constant_keyword = keywords_to_change[i]
for j in range(nx):
keywords_temp = keywords
values_temp = values
index_to_array = np.where(keywords == constant_keyword)
values_temp[index_to_array] = values_to_change
all_values_array.append ( values )
# we read a number of arguments from the command line
# the user can decide to change certain keywords
for i in range(len(keywords_to_change)):
keyword = keywords_to_change[i]
for j in range(len(values_to_change[i])):
# search keyword array for keyword desired
index_to_array = np.where(keywords == keyword)
# create an array of values that have a keyword match - should be length 1!!
value_matches = values[ index_to_array ]
if len(value_matches)>1: # if we havemore than one match this an error
Error('Multiple keyword matches in parameter file!')
elif len(value_matches)>0: # if we have one match, change the value
old_value = value_matches[0]
new_value = sys.argv[i+1]
values [index_to_array] = new_value
print 'Changed keyword %s from %s to %s' % (keyword, old_value, new_value)'''
|
gpl-2.0
|
sanketloke/scikit-learn
|
examples/gaussian_process/plot_gpc_xor.py
|
104
|
2132
|
"""
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
spectralliaisons/Spectrum
|
Spectrum.py
|
1
|
14846
|
# The MIT License (MIT)
# Copyright (c) 2013 Wesley Jackson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
############################
# Wes Jackson
# [email protected]
# Dec 2012
# Spectral analysis for many types of audio file
# All input converted to mono wav, as analysis may differ depending on file format
# USAGE:
# import Spectrum as s
# a = s.Analyze('sound/kombucut.wav', maxFreq=8)
# a.plot()
# TODO:
# [] Credit sources where I found useful code
# [] Sounds are converted to mono wav but sometimes the frequencies determined are off by a factor of 0.5X. Why is this?
# Requirements: pydub
############################
import math
import wave
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.ticker import FormatStrFormatter
import matplotlib.axis as Axis
import warnings
import struct
from pydub import AudioSegment as AS # for converting files to standard format
class Analyze:
def __init__(self,
fileName,
maxFreq=12., #kHz
windowSize=2048,
zeroPad=False,
window='hanning'):
self.window = window
self.doZeroPad = zeroPad
self.fMax = maxFreq
self.windowSize = windowSize
# convert to mono wav
self.fileName = self.exportMonoWav(fileName)
print self.fileName
# fft
self.analyze()
#########################
# Spectral Analysis
#########################
def analyze(self):
# clear & configure based on file format
self.clear()
self.configure()
# grab some audio frames
self.updateBuf()
while len(self.buf) == self.windowSize*self.swidth:
self.fftBuf()
self.updateBuf()
# combine analyses across frames
self.generalAnalysis()
def updateBuf(self):
self.buf = self.audata.readframes(self.windowSize)
def clear(self):
self.allF0 = []
self.allEnergy = []
self.allCentroid = []
self.allSkewness = []
self.allKurtosis = []
self.FFT = []
self.freqRange = []
def configure(self):
self.audata = wave.open(self.fileName, 'rb') # audioread.audio_open(self.fileName)
self.swidth = self.audata.getsampwidth() # 2 # 16-bit ^^ CHECK THIS IT WAS 2
self.schannels = self.audata.getnchannels() # self.audata.channels
self.srate = self.audata.getframerate() # self.audata.samplerate
# for zero-padding, take fft size next power of 2 above window size
self.paddedFFTSize = int(2**(1+np.ceil(np.log2(2*self.windowSize))))
print "Width, Channels, Rate, windowSize %d, %d, %d, %d" % (self.swidth, self.schannels, self.srate, self.windowSize)
#print "windowSize, paddedFFTSize: %d, %d" % (self.windowSize, self.paddedFFTSize)
#####################################################
# Run FFT for a chunk of audio and compile analysis
#####################################################
def fftBuf(self):
### 1. DECODE SAMPLES BUFFER
fmt = "%dh" % int(len(self.buf)/self.swidth)
#print "fmt size %d, %d" % (struct.calcsize(fmt), len(self.buf))
indata = np.array(struct.unpack(fmt, self.buf))
### 2. WINDOW
indata = self.applyWindow(indata, self.window)
# width in kHz of e/a frequency bin
self.binSize = (self.srate/(len(indata)/float(self.swidth)))/1000.
### 3. FFT
if self.doZeroPad:
realFFT = abs(np.fft.rfft(indata, n=self.paddedFFTSize))**2.
else:
realFFT = abs(np.fft.rfft(indata))**2.
### 4. Filter
realFFT = realFFT[0:self.freqToIndex(self.fMax)]
self.FFT.append(realFFT)
self.updateAnalysis(realFFT)
def updateAnalysis(self, fft):
## Calculate Energy
energy = fft.sum()
self.allEnergy.append(energy)
### Calculate Fundamental Freq
f0 = self.calculateF0(fft)
self.allF0.append(f0)
### Calculate Spectral Centroid
centroid = self.calculateSpectralCentroid(fft)
self.allCentroid.append(centroid)
### Calculate Skewness
skewness = stats.skew(fft)
self.allSkewness.append(skewness)
### Calculate Kurtosis
kurtosis = stats.kurtosis(fft)
self.allKurtosis.append(kurtosis)
def generalAnalysis(self):
self.loseLast();
# General Analysis for whole sound
# normalize energy across entire sound
self.allEnergy = self.allEnergy/np.amax(self.allEnergy)
# Unweighted mean across entire sound file
self.MeanF0 = np.mean(self.allF0)
self.MeanEnergy = np.mean(self.allEnergy)
self.MeanSpectralCentroid = np.mean(self.allCentroid)
self.MeanSkewness = np.mean(self.allSkewness)
self.MeanKurtosis = np.mean(self.allKurtosis)
### !!! these values change depending on whether fft is normalized
# weighted mean by energy for e/a chunk
self.WeightedF0 = np.average(self.allF0, weights=self.allEnergy)
self.WeightedSpectralCentroid = np.average(self.allCentroid, weights=self.allEnergy)
self.WeightedSkewness = np.average(self.allSkewness, weights=self.allEnergy)
self.WeightedKurtosis = np.average(self.allKurtosis, weights=self.allEnergy)
#print "Done!"
#print "Unweighted, weighted mean:"
print ">> F0: %f, %f" % (self.MeanF0, self.WeightedF0)
#print ">> Energy: %f" % self.MeanEnergy
print ">> Spectral Centroid: %f, %f" % (self.MeanSpectralCentroid, self.WeightedSpectralCentroid)
#print ">> Skewness: %d, %d" % (self.MeanSkewness, self.WeightedSkewness)
#print ">> Kurtosis: %d, %d" % (self.MeanKurtosis, self.WeightedKurtosis)
#########################
# Utils & Stats
#########################
def getFileType(self, str):
return str[str.index('.')+1:]
def getFileName(self, str):
return str[0:str.index('.')]
# Ensure standard format
def exportMonoWav(self, fileName):
ext = self.getFileType(fileName)
if ext == 'wav':
pre = AS.from_wav(fileName)
elif ext == 'mp3':
pre = AS.from_mp3(fileName)
elif ext == 'ogg':
pre = AS.from_ogg(fileName)
elif ext == 'flv':
pre = AS.from_flv(fileName)
else:
pre = AS.from_file(fileName)
# set mono &
pre = pre.set_channels(1)
#pre = pre.set_frame_rate(22050)
fout = self.getFileName(fileName) + '_AS_MONO_WAV_44100.wav'
pre.export(fout, format='wav')
return fout
def applyWindow(self, samples, window='hanning'):
if window == 'bartlett':
return samples*np.bartlett(len(samples))
elif window == 'blackman':
return samples*np.hanning(len(samples))
elif window == 'hamming':
return samples*np.hamming(len(samples))
elif window == 'kaiser':
return samples*np.kaiser(len(samples))
else:
return samples*np.hanning(len(samples))
def loseLast(self):
# Ignore last chunk since it has fewer bins
self.allF0 = self.allF0[0:len(self.allF0)-2]
self.allEnergy = self.allEnergy[0:len(self.allEnergy)-2]
self.allCentroid = self.allCentroid[0:len(self.allCentroid)-2]
self.allSkewness = self.allSkewness[0:len(self.allSkewness)-2]
self.allKurtosis = self.allKurtosis[0:len(self.allKurtosis)-2]
self.FFT = self.FFT[0:len(self.FFT)-2]
# Convert fft bin index to its corresponding frequency
def indexToFreq(self, index):
return index*float(self.binSize)
def freqToIndex(self, freq):
return freq/self.binSize
def calculateF0(self, fft):
freq = float('nan')
f0Index = fft[1:].argmax()+1 # find maximum-energy bin
# interpolate around max-energy freq unless f0 is the last bin :/
if f0Index != len(fft)-1:
y0, y1, y2 = np.log(fft[f0Index-1:f0Index+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
freq = self.indexToFreq(f0Index + x1)
else:
freq = self.indexToFreq(f0Index)
return freq
def calculateSpectralCentroid(self, fft):
centroidIndex = np.sum((1+np.arange(len(fft)))*fft)/float(fft.sum()) # +1 so index 0 isn't 0
return self.indexToFreq(centroidIndex-1)
def dB(self, a, b):
return 10. * np.log10(a/b)
# 1.567 -> 1.6
def round(n):
return round(n*10)/10.
#########################
# Visualization
#########################
### Overlay audio frame spectrograms:
### 1. Linear freq by energy
### 2. Log freq by dB
### 3. Log freq by amount of change between audio frames
def plot(self, xMin='NaN', xMax='NaN', dBMin=-90):
plt.figure()
plt.suptitle(self.fileName + ': F0: ' + str(int(self.WeightedF0)) + ', Centroid: ' + str(int(self.WeightedSpectralCentroid)))
#########################
# 1. Linear energy scale
#########################
linPlot = plt.subplot(311)
if xMin == 'NaN':
xMin = self.WeightedF0 - 0.05
if xMax == 'NaN':
xMax = self.fMax
# x-Axis as frequency
fs = []
for f in range(int(self.freqToIndex(self.fMax))):
fs.append(self.indexToFreq(f))
# y-Axis as normalized energy
for fft in self.FFT:
ys = fft/np.amax(self.FFT)
plt.plot(fs, ys, linewidth=2, color='black')
plt.fill_between(fs, ys, facecolor='green', alpha=0.5)
# plot centroid & fundamental freq
f0 = plt.plot([self.WeightedF0], [self.MeanEnergy], 'b^')
cent = plt.plot([self.WeightedSpectralCentroid], [self.MeanEnergy], 'ro')
plt.setp(f0, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.setp(cent, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.title('All Audio Frames: Linear')
#plt.text(0, 1, 'F0: ' + str(int(self.WeightedF0)) + ' Centroid: ' + str(int(self.WeightedSpectralCentroid)))
plt.grid(True)
#plt.xlabel('Frequency')
plt.ylabel('Energy')
plt.axis([xMin, xMax, 0, 1])
linPlot.xaxis.set_major_formatter(FormatStrFormatter('%.01f'))
linPlot.xaxis.set_minor_formatter(FormatStrFormatter('%.01f'))
#########################
# 2. dB energy scale
#########################
dBPlot = plt.subplot(312)
# x-Axis as frequency
fs = []
for f in range(int(self.freqToIndex(self.fMax))):
fs.append(self.indexToFreq(f))
#mdB = self.dB(self.MeanEnergy, np.amax(self.FFT))
alldBs = []
# y-Axis as normalized energy
for fft in self.FFT:
#ys = fft/np.amax(self.FFT)
dBs = []
for i in fft:
dB = max(dBMin, self.dB(i, np.amax(self.FFT)))
dBs.append(dB)
alldBs.append(dBs)
#dBPlot.plot(fs, dBs, linewidth=2, color='black')
plt.semilogx(fs, dBs, linewidth=2, color='black')
plt.fill_between(fs, dBs, dBMin, facecolor='green', alpha=0.3)
mindB = np.amin(alldBs)
mdB = np.mean(alldBs)
# plot centroid & fundamental freq
f0 = dBPlot.plot([self.WeightedF0], [mdB], 'b^')
cent = dBPlot.plot([self.WeightedSpectralCentroid], [mdB], 'ro')
plt.setp(f0, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.setp(cent, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.title('All Audio Frames: dB')
plt.grid(True)
#plt.xlabel('Frequency')
plt.ylabel('dB')
plt.axis([xMin, xMax, mindB, 0])
#plt.xscale('log')
dBPlot.xaxis.set_major_formatter(FormatStrFormatter('%.01f'))
dBPlot.xaxis.set_minor_formatter(FormatStrFormatter('%.01f'))
#########################
# 3. Spectral change as stdev of dB values for a freq bin across audio frames
# Use dB since much more energy at low freqs means higher stdev
#########################
devPlot = plt.subplot(313)
# e/a freq bin as array of energy in e/a frame
numBins = len(self.FFT[0])
numFrames = len(self.FFT)
allBins = np.arange(numBins*numFrames).reshape(numBins, numFrames)
binDev = np.arange(numBins)
for bin in range(numBins):
for frame in range(numFrames):
allBins[bin][frame] = self.dB(self.FFT[frame][bin], np.amax(self.FFT))
binDev[bin] = np.std(allBins[bin])
#normalize
#binDev = binDev/float(np.amax(binDev))
plt.semilogx(fs, binDev, linewidth=2, color='black')
plt.fill_between(fs, binDev, facecolor='red', alpha=0.5)
# plot centroid & fundamental freq
f0 = plt.plot([self.WeightedF0], [min(binDev)+5], 'b^')
cent = plt.plot([self.WeightedSpectralCentroid], [min(binDev)+5], 'ro')
plt.setp(f0, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.setp(cent, 'markersize', 12.0, 'markeredgewidth', 2.0)
plt.title('Spectral Deviation Across Audio Frames')
plt.grid(True)
plt.xlabel('Frequency (kHz)')
plt.ylabel('STD (dB)')
plt.axis([xMin, xMax, min(binDev), max(binDev)])
devPlot.xaxis.set_major_formatter(FormatStrFormatter('%.01f'))
devPlot.xaxis.set_minor_formatter(FormatStrFormatter('%.01f'))
plt.show()
|
mit
|
trovdimi/wikilinks
|
HypTrails.py
|
1
|
6728
|
# further implementations can be found:
# Python: https://github.com/psinger/hyptrails
# Java: https://bitbucket.org/florian_lemmerich/hyptrails4j
# Apache spark: http://dmir.org/sparktrails/
# also see: http://www.philippsinger.info/hyptrails/
from __future__ import division
import itertools
from scipy.sparse import csr_matrix
from scipy.special import gammaln
from collections import defaultdict
from sklearn.preprocessing import normalize
from scipy.sparse.sparsetools import csr_scale_rows
import numpy as np
class HypTrails():
"""
HypTrails
"""
def __init__(self, vocab=None):
"""
Constructor for class HypTrails
Args:
vocab: optional vocabulary mapping states to indices
"""
self.vocab = vocab
self.state_count = len(vocab)
def fit(self, transitions_matrix):
"""
Function for fitting the Markov Chain model given data
Args:
sequences: Data of sequences, list of lists
"""
self.transitions = transitions_matrix
#print "fit done"
def evidence(self, hypothesis, structur, k=1, prior=1., norm=True):
"""
Determines Bayesian evidence given fitted model and hypothesis
Args:
hypothesis: Hypothesis csr matrix,
indices need to map those of transition matrix
k: Concentration parameter k
prior: proto Dirichlet prior
norm: Flag for normalizing hypothesis matrix
Returns
evidence
"""
# care with copy here
hypothesis = csr_matrix(hypothesis, copy=True)
structur = csr_matrix(structur, copy=True)
pseudo_counts = k * self.state_count
if hypothesis.size != 0:
# in case of memory issues set copy to False but then care about changed hypothesis matrix
if norm == True:
#print "in norm"
norm_h = hypothesis.sum(axis=1)
n_nzeros = np.where(norm_h > 0)
norm_h[n_nzeros] = 1.0 / norm_h[n_nzeros]
norm_h = np.array(norm_h).T[0]
#print "in place mod"
# modify sparse_csc_matrix in place
csr_scale_rows(hypothesis.shape[0],
hypothesis.shape[1],
hypothesis.indptr,
hypothesis.indices,
hypothesis.data, norm_h)
# distribute pseudo counts to matrix, row-based approach
hypothesis = hypothesis * pseudo_counts
#print "after pseude counts"
# also consider those rows which only include zeros
norma = hypothesis.sum(axis=1)
n_zeros,_ = np.where(norma == 0)
hypothesis[n_zeros,:] = pseudo_counts / self.state_count
else:
#print "in norm"
norm_h = hypothesis.sum(axis=1)
n_nzeros = np.where(norm_h > 0)
norm_h[n_nzeros] = 1.0 / norm_h[n_nzeros]
norm_h = np.array(norm_h).T[0]
#print "in place mod"
# modify sparse_csc_matrix in place
csr_scale_rows(hypothesis.shape[0],
hypothesis.shape[1],
hypothesis.indptr,
hypothesis.indices,
hypothesis.data, norm_h)
# distribute pseudo counts to matrix, row-based approach
#TODO check if this line should be placed after the zero_rows_norm() call????
hypothesis = hypothesis * pseudo_counts
#self.zero_rows_norm(hypothesis, structur,k)
self.zero_rows_norm_eff1(hypothesis, structur, k)
else:
# if hypothesis matrix is empty, we can simply increase the proto prior parameter
prior += k
# transition matrix with additional Dirichlet prior
# not memory efficient
transitions_prior = self.transitions.copy()
transitions_prior = transitions_prior + hypothesis
#print "after copy"
# elegantly calculate evidence
evidence = 0
evidence += gammaln(hypothesis.sum(axis=1)+self.state_count*prior).sum()
evidence -= gammaln(self.transitions.sum(axis=1)+hypothesis.sum(axis=1)+self.state_count*prior).sum()
evidence += gammaln(transitions_prior.data+prior).sum()
evidence -= gammaln(hypothesis.data+prior).sum() + (len(transitions_prior.data)-len(hypothesis.data)) * gammaln(prior)
return evidence
def zero_rows_norm(self, hypothesis, structur,k):
norma = hypothesis.sum(axis=1)
n_zeros = np.where(norma == 0)
print 'n_zeros'
print len(n_zeros[0])
for x, i in enumerate(n_zeros[0]):
if x % 1000 == 0:
print x, len(n_zeros[0])
links = np.where(structur[i,:]!=0)
hypothesis[i,links[0]] = k / len(links[0])
print 'n_zeros done'
# def zero_rows_norm_eff(self,hypothesis, structur):
# #find zero sum rows in hypothesis
# print 'sum hyp'
# norma = hypothesis.sum(axis=1)
# n_zeros = np.where(norma == 0)
# # norm the structure matrix
# print 'sum structure'
# tmp = structur[n_zeros]
# norm_s = tmp.sum(axis=1)
# norm_s = np.array(norm_s).T[0]
# tmp = tmp/norm_s[:,None]
# #replece the zero rows in hypothesis with the corresponding rows in the normed strcuture matrix
# print 'replace'
# hypotheis[n_zeros,:]=tmp[n_zeros,:]
def zero_rows_norm_eff1(self,hypothesis, structur, k):
#find zero sum rows in hypothesis
#print 'sum hyp'
norma = hypothesis.sum(axis=1)
n_zeros = np.where(norma == 0)
# norm the structure matrix
i_index = list()
j_index = list()
values = list()
for x, i in enumerate(n_zeros[0]):
#if x % 1000 == 0:
# print x, len(n_zeros[0])
links = np.where(structur[i,:]!=0)
value = k / len(links[0])
for j in links[0]:
i_index.append(i)
j_index.append(j)
values.append(value)
hypothesis= hypothesis+csr_matrix((values, (i_index, j_index)),
shape=hypothesis.shape, dtype=np.float)
|
mit
|
vagner-fisica/pplot
|
pplot.py
|
1
|
5661
|
from pylab import *
import matplotlib.pyplot as plt
import os, trim
doc = open('documentation.txt','r').read()
def lzeros(nzeros, idx):
s = ("%0" + str(nzeros) + "d") % (idx)
return s
class pplot:
#----------------------
"""pplot class: developed by Vagner Bessa. Contact: [email protected]. For more information, use 'showDoc()' method or saveDoc() and check documentation.txt file."""
#----------------------
dataFolder = "dataFolder"
plotsFolder = "plotsFolder"
cwd = os.getcwd()
cwdls = os.listdir(cwd)
xDimPLot = 8
yDimPlot = xDimPLot
fig = plt.figure(figsize = (xDimPLot,yDimPlot), dpi = 80)
frameDim = [0.1,.1,.8,.8]
pXY = fig.add_axes(frameDim)
yLabel = r'$y(x)$'
xLabel = r'$x$'
dataname = 'data.dat'
plotname = 'plot.png'
X = []
Y = []
Z = []
XY = []
XYZ = []
matrix = []
colourPallete = {'colour': '#025167',\
'pblue':'#624cb8',\
'pred':'#bf5272',\
'pyellow':'#afc558',\
'pgreen' : '#5fcbab'\
}
#----------------------
def __init__(self, dFolder = "dataFolder", pFolder = "plotsFolder"):
"""Create the folder to work in.
"""
if type(dFolder) is str:
pth = os.path.join(self.cwd,dFolder)
if os.path.exists(pth):
print 'WARNING: Folder \'%s\' already exists.' %(dFolder)
else:
os.mkdir(dFolder)
self.dataFolder = dFolder
else:
pth = os.path.join(self.cwd,self.dataFolder)
if os.path.exists(pth):
print 'WARNING: Folder \'%s\' already exists.' %(self.dataFolder)
else:
os.mkdir(self.dataFolder)
if type(pFolder) is str:
pth = os.path.join(self.cwd,pFolder)
if os.path.exists(pth):
print 'WARNING: Folder \'%s\' already exists.' %(pFolder)
else:
os.mkdir(pFolder)
self.plotsFolder = pFolder
else:
pth = os.path.join(self.cwd,self.plotsFolder)
if os.path.exists(pth):
print 'WARNING: Folder \'%s\' already exists.' %(self.plotsFolder)
else:
os.mkdir(self.plotsFolder)
#----------------------
def plotfname(self,f):
"""Return path to file 'f' in folder pplot.plotsFolder
"""
f = os.path.join(self.plotsFolder,f)
return f
#----------------------
def datafname(self,f):
"""Return path to file 'f' in folder pplot.dataFolder
"""
f = os.path.join(self.dataFolder,f)
return f
#----------------------
def saveData(self,dataname,data):
"""Store data in file in pplot standar format:
x11 y12 z13 ...
x21 y22 z23 ...
x31 y32 z33 ...
.
.
.
"""
np.savetxt(dataname,data,delimiter = '\t',newline='\n')
#----------------------
def loadXYdata(self,fName):
"""A 2D array of data is loaded into member XY.
"""
try:
self.XY = np.loadtxt(open(fName))
except:
print 'ERROR: failed reading file \'%s\'. Make '\
'sure the column is tab separated.'\
%(fName)
#----------------------
def makeXYFrame(self,x,y):
"""Default frame for 2D-XY plot.
"""
from math import floor, ceil
xlim(x.min(),x.max())
ylim(y.min(),y.max())
xini = int(ceil(x.min()))
xend = int(floor(x.max()))
yini = int(ceil(y.min()))
yend = int(floor(y.max()))
xticks = linspace(xini,xend,5,endpoint=True)
yticks = linspace(yini,yend,5,endpoint=True)
self.pXY.set_xticks(xticks)
self.pXY.set_yticks(yticks)
self.pXY.set_xlabel(self.xLabel, fontsize = 18)
self.pXY.set_ylabel(self.yLabel, fontsize = 18)
#----------------------
def plotxy(self):
self.pXY.plot(self.X, self.Y, label = self.yLabel, color = self.colourPallete['colour'], lw = 2.0)
self.makeXYFrame(self.X,self.Y)
#----------------------
def sampleplotXY(self):
"""Plot with a sample data: y(x) = cos(x) in range [-2Pi,2Pi].
"""
self.dataname = self.datafname('sampledataXY.dat')
self.plotname = self.plotfname('sampleplotXY.png')
print "Ploting with \'%s\'" %(self.dataname)
self.X = linspace(-2*pi,2*pi,100,endpoint=True)
self.Y = cos(self.X)
self.XY = np.array(zip(self.X,self.Y),dtype=dtype)
self.saveData(self.dataname,self.XY)
self.pXY.set_title('Sample data')
self.plotxy()
self.fig.savefig(self.plotname, dpi = 80)
print "Plot saved at \'%s\'" %(self.plotname)
#----------------------
def plotXY(self,fName = None,title = None,SHOW = False,SAVE = False):
"""If no data is supplied, the sampleplotXY() method is executed.
"""
if fName is None:
self.sampleplotXY()
if SHOW:
show()
elif fName in self.cwdls:
if os.path.isfile(fName):
self.loadXYdata(fName)
xy = np.column_stack(self.XY)
self.X = xy[0]
self.Y = xy[1]
self.plotxy()
if SHOW:
show()
if type(title) is str:
self.pXY.set_title(title)
if SAVE:
self.fig.savefig(self.plotname, dpi = 80)
elif os.path.isfile(fName):
self.loadXYdata(fName)
self.loadXYdata(fName)
xy = np.column_stack(self.XY)
self.X = xy[0]
self.Y = xy[1]
self.plotxy()
if SHOW:
show()
if type(title) is str:
self.pXY.set_title(title)
if SAVE:
self.fig.savefig(self.plotname, dpi = 80)
else:
print 'ERROR: file \'%\' not found. Make sure you\'ve supplied full path or file is in current working dir:\n\'%s\''%(fName,cwd)
#----------------------
def showDoc(self):
"""Print pplot's doc string
"""
print trim.trim(doc)
def showDoc(self, f):
"""Usefull to print <methodname>.__doc__.
Example:
'myplot.showDoc(myplot.showDoc.__doc__)'
shows this very docs string without \\t, \\n or identation.
"""
print trim.trim(f)
#----------------------
def saveDoc(self):
out = open("documentation.txt","w")
out.write(trim.trim(doc))
#a = pplot()
#a.plotXY("Free_Field.dat",SHOW = True)
#a.doc(a.showDoc().__doc__)
|
mit
|
hitszxp/scikit-learn
|
examples/applications/plot_stock_market.py
|
29
|
8284
|
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WAG': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
|
bsd-3-clause
|
rfinn/LCS
|
python/display_galfit.py
|
1
|
3215
|
#!/usr/bin/env python
import os
from astropy.io import fits
from matplotlib import pyplot as plt
from astropy.visualization import simple_norm
from astropy import units as u
from astropy.wcs import WCS
from matplotlib import pyplot as plt
homedir = os.getenv("HOME")
from scipy.stats import scoreatpercentile
os.sys.path.append(homedir+'/github/virgowise/')
import rungalfit as rg #This code has all the defined functions that I can use
def display_galfit_model(image,percentile1=.5,percentile2=99.5,p1residual=5,p2residual=99,cmap='viridis',zoom=None):
'''
ARGS:
percentile1 = min percentile for stretch of image and model
percentile2 = max percentile for stretch of image and model
p1residual = min percentile for stretch of residual
p2residual = max percentile for stretch of residual
cmap = colormap, default is viridis
'''
# model name
filename = image
pngname = image.split('.fits')[0]+'.png'
image,h = fits.getdata(filename,1,header=True)
model = fits.getdata(filename,2)
residual = fits.getdata(filename,3)
if zoom is not None:
print("who's zoomin' who?")
# display central region of image
# figure out how to zoom
# get image dimensions and center
xmax,ymax = image.shape
xcenter = int(xmax/2)
ycenter = int(ymax/2)
# calculate new size to display based on zoom factor
new_xradius = int(xmax/2/(float(zoom)))
new_yradius = int(ymax/2/(float(zoom)))
# calculate pixels to keep based on zoom factor
x1 = xcenter - new_xradius
x2 = xcenter + new_xradius
y1 = ycenter - new_yradius
y2 = ycenter + new_yradius
# check to make sure limits are not outsize image dimensions
if (x1 < 1):
x1 = 1
if (y1 < 1):
y1 = 1
if (x2 > xmax):
x2 = xmax
if (y2 > ymax):
y2 = ymax
# cut images to new size
image = image[x1:x2,y1:y2]
model = model[x1:x2,y1:y2]
residual = residual[x1:x2,y1:y2]
pass
wcs = WCS(h)
images = [image,model,residual]
titles = ['image','model','residual']
v1 = [scoreatpercentile(image,percentile1),
scoreatpercentile(image,percentile1),
scoreatpercentile(residual,p1residual)]
v2 = [scoreatpercentile(image,percentile2),
scoreatpercentile(image,percentile2),
scoreatpercentile(residual,p2residual)]
norms = [simple_norm(image,'asinh',max_percent=percentile2),
simple_norm(image,'asinh',max_percent=percentile2),
simple_norm(residual,'linear',max_percent=p2residual)]
plt.figure(figsize=(14,6))
plt.subplots_adjust(wspace=.0)
for i,im in enumerate(images):
plt.subplot(1,3,i+1,projection=wcs)
plt.imshow(im,origin='lower',cmap=cmap,vmin=v1[i],vmax=v2[i],norm=norms[i])
plt.xlabel('RA')
if i == 0:
plt.ylabel('DEC')
else:
ax = plt.gca()
ax.set_yticks([])
plt.title(titles[i],fontsize=16)
plt.savefig(pngname)
def print_galfit_model(image):
t = rg.parse_galfit_1comp(image,printflag=True)
#print(t)
|
gpl-3.0
|
SU-ECE-17-7/ibeis
|
ibeis/algo/hots/smk/smk_scoring.py
|
1
|
12264
|
# -*- coding: utf-8 -*-
"""
The functions for scoring smk matches
"""
from __future__ import absolute_import, division, print_function
import utool
#import pandas as pd
import numpy as np
#import scipy.sparse as spsparse
#from ibeis.algo.hots import hstypes
from ibeis.algo.hots import hstypes
from six.moves import zip
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[smk_scoring]')
DEBUG_SMK = utool.DEBUG2 or utool.get_argflag('--debug-smk')
@profile
def sccw_summation(rvecs_list, flags_list, idf_list, maws_list, smk_alpha, smk_thresh):
r"""
Computes gamma from "To Aggregate or not to aggregate". Every component in
each list is with repsect to a different word.
scc = self consistency criterion
It is a scalar which ensure K(X, X) = 1
Args:
rvecs_list (list of ndarrays): residual vectors for every word
idf_list (list of floats): idf weight for each word
maws_list (list of ndarrays): multi-assign weights for each word for each residual vector
smk_alpha (float): selectivity power
smk_thresh (float): selectivity threshold
Returns:
float: sccw self-consistency-criterion weight
Math:
\begin{equation}
\gamma(X) = (\sum_{c \in \C} w_c M(X_c, X_c))^{-.5}
\end{equation}
Example:
>>> from ibeis.algo.hots.smk.smk_scoring import * # NOQA
>>> from ibeis.algo.hots.smk import smk_scoring
>>> from ibeis.algo.hots.smk import smk_debug
>>> #idf_list, rvecs_list, maws_list, smk_alpha, smk_thresh, wx2_flags = smk_debug.testdata_sccw_sum(db='testdb1')
>>> tup = smk_debug.testdata_sccw_sum(db='PZ_MTEST', nWords=128000)
>>> idf_list, rvecs_list, flags_list, maws_list, smk_alpha, smk_thresh = tup
>>> sccw = smk_scoring.sccw_summation(rvecs_list, flags_list, idf_list, maws_list, smk_alpha, smk_thresh)
>>> print(sccw)
0.0201041835751
CommandLine:
python smk_match.py --db PZ_MOTHERS --nWords 128
Ignore:
0.0384477314197
qmaws_list = dmaws_list = maws_list
drvecs_list = qrvecs_list = rvecs_list
dflags_list = qflags_list = flags_list
flags_list = flags_list[7:10]
maws_list = maws_list[7:10]
idf_list = idf_list[7:10]
rvecs_list = rvecs_list[7:10]
"""
num_rvecs = len(rvecs_list)
if DEBUG_SMK:
assert maws_list is None or len(maws_list) == num_rvecs, 'inconsistent lengths'
assert num_rvecs == len(idf_list), 'inconsistent lengths'
assert maws_list is None or list(map(len, maws_list)) == list(map(len, rvecs_list)), 'inconsistent per word lengths'
assert flags_list is None or list(map(len, maws_list)) == list(map(len, flags_list)), 'inconsistent per word lengths'
assert flags_list is None or len(flags_list) == num_rvecs, 'inconsistent lengths'
# Indexing with asymetric multi-assignment might get you a non 1 self score?
# List of scores for every word.
scores_list = score_matches(rvecs_list, rvecs_list, flags_list, flags_list,
maws_list, maws_list, smk_alpha, smk_thresh,
idf_list)
if DEBUG_SMK:
assert len(scores_list) == num_rvecs, 'bad rvec and score'
assert len(idf_list) == len(scores_list), 'bad weight and score'
# Summation over all residual vector scores
_count = sum((scores.size for scores in scores_list))
_iter = utool.iflatten(scores.ravel() for scores in scores_list)
self_rawscore = np.fromiter(_iter, np.float64, _count).sum()
# Square root inverse to enforce normalized self-score is 1.0
sccw = np.reciprocal(np.sqrt(self_rawscore))
try:
assert not np.isinf(sccw), 'sccw cannot be infinite'
assert not np.isnan(sccw), 'sccw cannot be nan'
except AssertionError as ex:
utool.printex(ex, 'problem computing self consistency criterion weight',
keys=['num_rvecs'], iswarning=True)
if num_rvecs > 0:
raise
else:
sccw = 1
return sccw
@profile
def score_matches(qrvecs_list, drvecs_list, qflags_list, dflags_list,
qmaws_list, dmaws_list, smk_alpha, smk_thresh, idf_list):
"""
Similarity + Selectivity: M(X_c, Y_c)
Computes the similarity matrix between word correspondences
Args:
qrvecs_list : query vectors for each word
drvecs_list : database vectors for each word
qmaws_list : multi assigned weights for each query word
dmaws_list : multi assigned weights for each database word
smk_alpha : selectivity power
smk_thresh : selectivity smk_thresh
Returns:
list : list of score matrices
References:
https://lear.inrialpes.fr/~douze/enseignement/2013-2014/presentation_papers/tolias_aggregate.pdf
Example:
>>> from ibeis.algo.hots.smk.smk_scoring import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> smk_alpha = 3
>>> smk_thresh = 0
>>> qrvecs_list = [smk_debug.get_test_rvecs(_) for _ in range(10)]
>>> drvecs_list = [smk_debug.get_test_rvecs(_) for _ in range(10)]
>>> qmaws_list = [smk_debug.get_test_maws(rvecs) for rvecs in qrvecs_list]
>>> dmaws_list = [np.ones(rvecs.shape[0], dtype=hstypes.FLOAT_TYPE) for rvecs in qrvecs_list]
>>> idf_list = [1.0 for _ in qrvecs_list]
>>> scores_list = score_matches(qrvecs_list, drvecs_list, qmaws_list, dmaws_list, smk_alpha, smk_thresh, idf_list)
"""
# Cosine similarity between normalized residuals
simmat_list = similarity_function(qrvecs_list, drvecs_list, qflags_list, dflags_list)
# Apply sigma selectivity (power law) (BEFORE WEIGHTING)
scoremat_list = selectivity_function(simmat_list, smk_alpha, smk_thresh)
# Apply Weights (AFTER SELECTIVITY)
wscoremat_list = apply_weights(scoremat_list, qmaws_list, dmaws_list, idf_list)
return wscoremat_list
def rvecs_dot_uint8(qrvecs, drvecs):
return qrvecs.astype(np.float32).dot(drvecs.T.astype(np.float32)) / hstypes.RVEC_PSEUDO_MAX_SQRD
@profile
def similarity_function(qrvecs_list, drvecs_list, qflags_list, dflags_list):
""" Phi dot product.
Args:
qrvecs_list (list): query residual vectors for each matching word
drvecs_list (list): corresponding database residual vectors
qflags_list (list): indicates if a query vector was nan
dflags_list (list): indicates if a database vector was nan
Returns:
simmat_list
qrvecs_list list of rvecs for each word
Example:
>>> from ibeis.algo.hots.smk.smk_scoring import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> qrvecs_list, drvecs_list = smk_debug.testdata_similarity_function()
>>> simmat_list = similarity_function(qrvecs_list, drvecs_list)
"""
# For int8: Downweight by the psuedo max squared, to get scores between 0 and 1
simmat_list = [
rvecs_dot_uint8(qrvecs, drvecs)
for qrvecs, drvecs in zip(qrvecs_list, drvecs_list)
]
if utool.DEBUG2:
assert len(simmat_list) == len(qrvecs_list), 'bad simmat and qrvec'
assert len(simmat_list) == len(drvecs_list), 'bad simmat and drvec'
if qflags_list is not None and dflags_list is not None:
# Set any scores resulting from flagged vectors to 1
# Actually lets add .5 because we dont know if a flagged vector
# is a good match, but if both database and query are flagged then
# it must be a good match
for qflags, dflags, simmat in zip(qflags_list, dflags_list, simmat_list):
simmat[qflags] += 0.5
simmat.T[dflags] += 0.5
elif qflags_list is not None:
for qflags, simmat in zip(qflags_list, simmat_list):
simmat[qflags] += 0.5
elif dflags_list is not None:
for dflags, simmat in zip(dflags_list, simmat_list):
simmat.T[dflags] += 0.5
# for float16: just perform the calculation
#simmat_list = [
# qrvecs.dot(drvecs.T)
# for qrvecs, drvecs in zip(qrvecs_list, drvecs_list)
#]
# uint8 does not have nans. We need to use flag lists
#for simmat in simmat_list:
# simmat[np.isnan(simmat)] = 1.0
return simmat_list
@profile
def apply_weights(simmat_list, qmaws_list, dmaws_list, idf_list):
"""
Applys multi-assign weights and idf weights to rvec similarty matrices
TODO: Maybe should apply the sccw weights too?
Accounts for rvecs being stored as int8's
Example:
>>> from ibeis.algo.hots.smk.smk_scoring import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> simmat_list, qmaws_list, dmaws_list, idf_list = smk_debug.testdata_apply_weights()
>>> wsim_list = apply_weights(simmat_list, qmaws_list, dmaws_list, idf_list)
"""
word_weight_list = idf_list
if qmaws_list is None and dmaws_list is None:
wsim_list = [
(word_weight * simmat)
for simmat, word_weight in
zip(simmat_list, word_weight_list)
]
elif qmaws_list is not None and dmaws_list is not None:
wsim_list = [
(((word_weight * qmaws[:, None]) * simmat) * dmaws[None, :])
for simmat, qmaws, dmaws, word_weight in
zip(simmat_list, qmaws_list, dmaws_list, word_weight_list)
]
elif qmaws_list is not None and dmaws_list is None:
wsim_list = [
((word_weight * qmaws[:, None]) * simmat)
for simmat, qmaws, word_weight in
zip(simmat_list, qmaws_list, word_weight_list)
]
else:
raise NotImplementedError('cannot just do dmaws')
return wsim_list
#@profile
@profile
def selectivity_function(wsim_list, smk_alpha, smk_thresh):
""" Selectivity function - sigma from SMK paper rscore = residual score
Downweights weak matches using power law normalization and thresholds
anybody that is too weak
Example:
>>> import numpy as np
>>> from ibeis.algo.hots.smk import smk_debug
>>> smk_debug.rrr()
>>> np.random.seed(0)
>>> wsim_list, smk_alpha, smk_thresh = smk_debug.testdata_selectivity_function()
Timeits:
>>> import utool
>>> utool.util_dev.rrr()
>>> setup = utool.codeblock(
... '''
import numpy as np
import scipy.sparse as spsparse
from ibeis.algo.hots.smk import smk_debug
np.random.seed(0)
wsim_list, smk_alpha, smk_thresh = smk_debug.testdata_selectivity_function()
scores_iter = [
np.multiply(np.sign(mawmat), np.power(np.abs(mawmat), smk_alpha))
for mawmat in wsim_list
]
''')
>>> stmt_list = utool.codeblock(
... '''
scores_list0 = [np.multiply(scores, np.greater(scores, smk_thresh)) for scores in scores_iter]
scores_list1 = [spsparse.coo_matrix(np.multiply(scores, np.greater(scores, smk_thresh))) for scores in scores_iter]
scores_list2 = [spsparse.dok_matrix(np.multiply(scores, np.greater(scores, smk_thresh))) for scores in scores_iter]
scores_list3 = [spsparse.lil_matrix(np.multiply(scores, np.greater(scores, smk_thresh))) for scores in scores_iter]
'''
... ).split('\n')
>>> utool.util_dev.timeit_compare(stmt_list, setup, int(1E4))
scores0 = scores_list0[-1]
scores1 = scores_list1[-1]
scores2 = scores_list2[-1]
scores3 = scores_list3[-1]
%timeit scores0.sum()
%timeit scores1.sum()
%timeit scores2.sum()
%timeit scores3.sum()
"""
# Apply powerlaw
scores_iter = [
np.multiply(np.sign(mawmat), np.power(np.abs(mawmat), smk_alpha))
for mawmat in wsim_list
]
# Apply threshold
scores_list = [
np.multiply(scores, np.greater(scores, smk_thresh))
for scores in scores_iter
]
if utool.DEBUG2:
assert len(scores_list) == len(wsim_list)
return scores_list
|
apache-2.0
|
bundgus/python-playground
|
bokeh-playground/bokeh-flask.py
|
1
|
2483
|
import flask
import pandas as pd
import numpy as np
import blaze as bz
from odo import odo
from bokeh.embed import components
from bokeh.resources import INLINE
from bokeh.templates import RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models import ColumnDataSource
import bokeh.plotting as plt
app = flask.Flask(__name__)
db = bz.Data('sqlite:///lahman2013.sqlite')
distinct_teams = list(db.Salaries.teamID.distinct())
distinct_years = list(db.Salaries.yearID.distinct())
def create_plot(team="LAA", year=2012):
expr = bz.by(db.Salaries.teamID,
avg=db.Salaries.salary.mean(),
max=db.Salaries.salary.max(),
ratio=db.Salaries.salary.max() / db.Salaries.salary.min())
expr = expr.sort('ratio', ascending=False)
df_salary_gb = odo(expr, pd.DataFrame)
source1 = odo(df_salary_gb[["teamID", "avg"]], ColumnDataSource)
plot1 = plt.figure(title="Salary ratio by team", x_range=list(df_salary_gb["teamID"]))
plot1.scatter(x="teamID", y="avg", source=source1, size=20)
plot1.xaxis.major_label_orientation = np.pi/3
df = odo(db.Salaries, pd.DataFrame)
df = df[df["teamID"] == team]
df = df[df["yearID"] == year]
df = df[["playerID","salary"]].sort('salary')
source_team = odo(df, ColumnDataSource)
p_team = plt.figure(title="Salary of players for %s during %s" % (team, year),
x_range=list(df["playerID"]))#, tools=TOOLS)
p_team.scatter(x="playerID", y="salary", source=source_team, size=20)
p_team.xaxis.major_label_orientation = np.pi/3
p = plt.gridplot([[plot1, p_team]])
return p
@app.route("/")
def index():
args = flask.request.args
selected_team = flask.request.values.get("selected_team", "LAA")
selected_year = int(flask.request.values.get("selected_year", "2012"))
p = create_plot(selected_team, selected_year)
plot_resources = RESOURCES.render(
js_raw=INLINE.js_raw,
css_raw=INLINE.css_raw,
js_files=INLINE.js_files,
css_files=INLINE.css_files,
)
script, div = components(p, INLINE)
html = flask.render_template(
'embed.html',
plot_script=script,
plot_div=div,
plot_resources=plot_resources,
selected_team=selected_team,
selected_year=selected_year,
years=distinct_years,
teams=distinct_teams,
)
return encode_utf8(html)
if __name__ == "__main__":
app.run(debug=True)
|
mit
|
hahnicity/ecs251-final-project
|
test.py
|
1
|
4680
|
#Mchine learning script
from sys import argv, exit
import csv
import os
import math
from glob import glob
from pandas import read_csv
import numpy as np
#using keywords data to do classification
#data_p = np.recarray((len(keywords),), names = keywords)--doesn't work
import time
import matplotlib.pyplot as plt
#------------------------------------------------------
#from fitgaus import fitgausdist
#----------------------------------------
def normlize(vec):
average = np.sum(vec)
stddev = np.std(vec)
vec = (vec-average)/stddev
return vec
def get_cohort_files(cohort, keywords):
"""
Read data and get parameters for processing
"""
if cohort not in ["ardscohort", "controlcohort"]:
raise Exception("Input must either be ardscohort or controlcohort")
path = dirname(__file__) + cohort
#dirs = os.listdir(path)
dirs = glob(path+'/0*')
data = []
k = 0
for dir in dirs:
path_temp = dir
name = glob(path_temp+'/*.csv')
df = read_csv(name[0])
#print 'Opening file: ', name
k += 1
#for each patient
flag = 0
paras = []
m = 50
a = df[keywords[0]]
print len(a)
for i in range(0, len(keywords)):
a = df[keywords[i]]
if len(a) <= m:
print name, ' is an empty file'
flag = 1
break
#fit to normal distribution
#para = fitgausdist(a)
#mean = np.sum(a)/float(len(a))
#std = np.std(a)
#para = [mean, std]
partial = a[0:m]
para = partial
if math.isnan(para[0]):
print name[0], keywords[i]
flag = 1
break
paras.extend(para)
if flag == 0:
data.append(paras)
k -= flag
return data, k
keywords = ['I:E ratio','eTime','PIP', 'Maw', 'PEEP', 'ipAUC', 'epAUC'] #, 'minF_to_zero'
cohort = "ardscohort"
data_p, k1 = get_cohort_files(cohort, keywords)
print 'get %d files from patients' % k1
cohort = "controlcohort"
temp, k2 = get_cohort_files(cohort, keywords)
print 'get %d files from control group' % k2
labels = np.append(np.ones(k1-3), np.zeros(k2-3))
#data_p = np.transpose(data_p)
#print "Done reading data, get arrary of size", len(data_p)
#from sklearn import preprocessing
#normalized_X = preprocessing.normalize(np.array(data_p))
def normalize(x):
x = np.dot(x, np.identity(len(x))-np.ones(len(x))/len(x))
return x
#-------------FDA-----------------
datatrain = []
datatrain.extend(data_p[0:k1-3])
datatrain.extend(temp[0:k2-3])
datatest = data_p[-3:]
datatest.extend(temp[-3:])
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
clf = LinearDiscriminantAnalysis()
clf.fit(datatrain, labels)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
print(clf.predict(datatest))
#should clear that one file
'''
1. some blank files
2. some files missing features
3. some files too short, less than 1000 breath
4. not using the min_f feature... the important one
1. find out the collinear variables -- rank 70 of matrix 76,
2. use other partial data for testing - need to verify validity
3. pull out the classification plot
4. look for more features to use instead of just raw feature
5. break one patien to multiple?
6. kernalize?
1. 300 too long
2. 50 better than 100 or 30
'''
#--------------------------------
'''
for root, dirs, files in os.walk(path_ards):
for name in files:
if name.endswith((".csv")):
for filename in os.listdir(path_ards):
df = pd.read_csv(path_ards+filename)
temp = []
flag = 0
print 'Patient ', filename, ' , number of breaths', len(df[keywords[0]])
for i in range(0, len(keywords)):
a = df[keywords[i]] #column
amean = np.sum(a)#/len(a)
data_person.append(a)
if amean == 0:
print filename#, keywords[i]
flag = 1
break
#print len(temp)
temp = np.array(data_person, dtype = keywords)
if flag == 0:
data_raw.append(temp)
def mypca(x):
result = la.eigh(np.dot(x, np.transpose(x)))
plt.plot(result[0])
plt.ylabel('Log of Residual History')
plt.show()
z = np.dot(np.dot(np.transpose(result[1]),result[1]), x)
zcumu = np.dot(result[0], z)
plt.plot(zcumu)
plt.ylabel('Log of Residual History')
plt.show()
print zcumu
return zcumu
'''
|
mit
|
JosmanPS/scikit-learn
|
sklearn/tree/export.py
|
53
|
15772
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _tree.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
jwlawson/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
10
|
12872
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
ChristosChristofidis/h2o-3
|
h2o-py/h2o/frame.py
|
1
|
47685
|
# -*- coding: utf-8 -*-
# import numpy no numpy cuz windoz
import collections, csv, itertools, os, re, tempfile, uuid, urllib2, sys, urllib,imp
from expr import h2o,ExprNode
import gc
class H2OFrame:
# Magical count-of-5: (get 2 more when looking at it in debug mode)
# 2 for _do_it frame, 2 for _do_it local dictionary list, 1 for parent
MAGIC_REF_COUNT = 5 if sys.gettrace() is None else 7 # M = debug ? 7 : 5
def __init__(self, python_obj=None, file_path=None, raw_id=None, expr=None):
"""
Create a new H2OFrame object by passing a file path or a list of H2OVecs.
If `remote_fname` is not None, then a REST call will be made to import the
data specified at the location `remote_fname`. This path is relative to the
H2O cluster, NOT the local Python process
If `python_obj` is not None, then an attempt to upload the python object to H2O
will be made. A valid python object has type `list`, or `dict`.
For more information on the structure of the input for the various native python
data types ("native" meaning non-H2O), please see the general documentation for
this object.
:param python_obj: A "native" python object - list, dict, tuple.
:param remote_fname: A remote path to a data source. Data is cluster-local.
:param vecs: A list of H2OVec objects.
:param text_key: A raw key resulting from an upload_file.
:return: An instance of an H2OFrame object.
"""
self._id = _py_tmp_key() # gets overwritten if a parse happens
self._keep = False
self._nrows = None
self._ncols = None
self._col_names = None
self._computed = False
self._ast = None
if expr is not None: self._ast = expr
elif python_obj is not None: self._upload_python_object(python_obj)
elif file_path is not None: self._import_parse(file_path)
elif raw_id is not None: self._handle_text_key(raw_id)
else: pass
@staticmethod
def get_frame(frame_id):
res = h2o.H2OConnection.get_json("Frames/"+urllib.quote(frame_id))["frames"][0]
fr = H2OFrame()
fr._nrows = res["rows"]
fr._ncols = res["total_column_count"]
fr._id = res["frame_id"]["name"]
fr._computed = True
fr._keep = True
fr._col_names = [c["label"] for c in res["columns"]]
return fr
def __str__(self): return self._id
def _import_parse(self,file_path):
rawkey = h2o.import_file(file_path)
setup = h2o.parse_setup(rawkey)
parse = h2o.parse(setup, _py_tmp_key()) # create a new key
self._id = parse["job"]["dest"]["name"]
self._computed=True
self._nrows = int(H2OFrame(expr=ExprNode("nrow", self))._scalar())
self._ncols = parse["number_columns"]
self._col_names = parse['column_names'] if parse["column_names"] else ["C" + str(x) for x in range(1,self._ncols+1)]
self._keep = True
thousands_sep = h2o.H2ODisplay.THOUSANDS
if isinstance(file_path, str): print "Imported {}. Parsed {} rows and {} cols".format(file_path,thousands_sep.format(self._nrows), thousands_sep.format(self._ncols))
else: h2o.H2ODisplay([["File"+str(i+1),f] for i,f in enumerate(file_path)],None, "Parsed {} rows and {} cols".format(thousands_sep.format(self._nrows), thousands_sep.format(self._ncols)))
def _upload_python_object(self, python_obj):
"""
Properly handle native python data types. For a discussion of the rules and
permissible data types please refer to the main documentation for H2OFrame.
:param python_obj: A tuple, list, dict, collections.OrderedDict
:return: None
"""
# [] and () cases -- folded together since H2OFrame is mutable
if isinstance(python_obj, (list, tuple)): header, data_to_write = _handle_python_lists(python_obj)
# {} and collections.OrderedDict cases
elif isinstance(python_obj, (dict, collections.OrderedDict)): header, data_to_write = _handle_python_dicts(python_obj)
# handle a numpy.ndarray
# elif isinstance(python_obj, numpy.ndarray):
#
# header, data_to_write = H2OFrame._handle_numpy_array(python_obj)
else: raise ValueError("`python_obj` must be a tuple, list, dict, collections.OrderedDict. Got: " + str(type(python_obj)))
if header is None or data_to_write is None: raise ValueError("No data to write")
#
## write python data to file and upload
#
# create a temporary file that will be written to
tmp_handle,tmp_path = tempfile.mkstemp(suffix=".csv")
tmp_file = os.fdopen(tmp_handle,'wb')
# create a new csv writer object thingy
csv_writer = csv.DictWriter(tmp_file, fieldnames=header, restval=None, dialect="excel", extrasaction="ignore", delimiter=",")
csv_writer.writeheader() # write the header
csv_writer.writerows(data_to_write) # write the data
tmp_file.close() # close the streams
self._upload_raw_data(tmp_path) # actually upload the data to H2O
os.remove(tmp_path) # delete the tmp file
def _handle_text_key(self, text_key, check_header=None):
"""
Handle result of upload_file
:param test_key: A key pointing to raw text to be parsed
:return: Part of the H2OFrame constructor.
"""
# perform the parse setup
setup = h2o.parse_setup(text_key)
if check_header is not None: setup["check_header"] = check_header
parse = h2o.parse(setup, _py_tmp_key())
self._computed=True
self._id = parse["destination_frame"]["name"]
self._ncols = parse["number_columns"]
self._col_names = cols = parse['column_names'] if parse["column_names"] else ["C" + str(x) for x in range(1,self._ncols+1)]
self._nrows = int(H2OFrame(expr=ExprNode("nrow", self))._scalar())
self._keep = True
thousands_sep = h2o.H2ODisplay.THOUSANDS
print "Uploaded {} into cluster with {} rows and {} cols".format(text_key, thousands_sep.format(self._nrows), thousands_sep.format(len(cols)))
def _upload_raw_data(self, tmp_file_path):
fui = {"file": os.path.abspath(tmp_file_path)} # file upload info is the normalized path to a local file
dest_key = _py_tmp_key() # create a random name for the data
h2o.H2OConnection.post_json("PostFile", fui, destination_frame=dest_key) # do the POST -- blocking, and "fast" (does not real data upload)
self._handle_text_key(dest_key, 1) # actually parse the data and setup self._vecs
def __iter__(self):
"""
Allows for list comprehensions over an H2OFrame
:return: An iterator over the H2OFrame
"""
self._eager()
ncol = self._ncols
return (self[i] for i in range(ncol))
def logical_negation(self): H2OFrame(expr=ExprNode("not", self))
# ops
def __add__ (self, i): return H2OFrame(expr=ExprNode("+", self,i))
def __sub__ (self, i): return H2OFrame(expr=ExprNode("-", self,i))
def __mul__ (self, i): return H2OFrame(expr=ExprNode("*", self,i))
def __div__ (self, i): return H2OFrame(expr=ExprNode("/", self,i))
def __floordiv__(self, i): return H2OFrame(expr=ExprNode("intDiv",self,i))
def __mod__ (self, i): return H2OFrame(expr=ExprNode("mod", self,i))
def __or__ (self, i): return H2OFrame(expr=ExprNode("|", self,i))
def __and__ (self, i): return H2OFrame(expr=ExprNode("&", self,i))
def __ge__ (self, i): return H2OFrame(expr=ExprNode(">=", self,i))
def __gt__ (self, i): return H2OFrame(expr=ExprNode(">", self,i))
def __le__ (self, i): return H2OFrame(expr=ExprNode("<=", self,i))
def __lt__ (self, i): return H2OFrame(expr=ExprNode("<", self,i))
def __eq__ (self, i): return H2OFrame(expr=ExprNode("==", self,i))
def __ne__ (self, i): return H2OFrame(expr=ExprNode("N", self,i))
def __pow__ (self, i): return H2OFrame(expr=ExprNode("^", self,i))
# rops
def __rmod__(self, i): return H2OFrame(expr=ExprNode("mod",i,self))
def __radd__(self, i): return self.__add__(i)
def __rsub__(self, i): return H2OFrame(expr=ExprNode("-",i, self))
def __rand__(self, i): return self.__and__(i)
def __ror__ (self, i): return self.__or__ (i)
def __rdiv__(self, i): return H2OFrame(expr=ExprNode("/",i, self))
def __rfloordiv__(self, i): return H2OFrame(expr=ExprNode("intDiv",i,self))
def __rmul__(self, i): return self.__mul__(i)
def __rpow__(self, i): return H2OFrame(expr=ExprNode("^",i, self))
# unops
def __abs__ (self): return H2OFrame(expr=ExprNode("abs",self))
def __contains__(self, i): return all([(t==self).any() for t in i]) if _is_list(i) else (i==self).any()
def mult(self, matrix):
"""
Perform matrix multiplication.
:param matrix: The matrix to multiply to the left of self.
:return: The multiplied matrices.
"""
return H2OFrame(expr=ExprNode("x", self, matrix))
def cos(self) : return H2OFrame(expr=ExprNode("cos", self))
def sin(self) : return H2OFrame(expr=ExprNode("sin", self))
def tan(self) : return H2OFrame(expr=ExprNode("tan", self))
def acos(self) : return H2OFrame(expr=ExprNode("acos", self))
def asin(self) : return H2OFrame(expr=ExprNode("asin", self))
def atan(self) : return H2OFrame(expr=ExprNode("atan", self))
def cosh(self) : return H2OFrame(expr=ExprNode("cosh", self))
def sinh(self) : return H2OFrame(expr=ExprNode("sinh", self))
def tanh(self) : return H2OFrame(expr=ExprNode("tanh", self))
def acosh(self) : return H2OFrame(expr=ExprNode("acosh", self))
def asinh(self) : return H2OFrame(expr=ExprNode("asinh", self))
def atanh(self) : return H2OFrame(expr=ExprNode("atanh", self))
def cospi(self) : return H2OFrame(expr=ExprNode("cospi", self))
def sinpi(self) : return H2OFrame(expr=ExprNode("sinpi", self))
def tanpi(self) : return H2OFrame(expr=ExprNode("tanpi", self))
def abs(self) : return H2OFrame(expr=ExprNode("abs", self))
def sign(self) : return H2OFrame(expr=ExprNode("sign", self))
def sqrt(self) : return H2OFrame(expr=ExprNode("sqrt", self))
def trunc(self) : return H2OFrame(expr=ExprNode("trunc", self))
def ceil(self) : return H2OFrame(expr=ExprNode("ceiling", self))
def floor(self) : return H2OFrame(expr=ExprNode("floor", self))
def log(self) : return H2OFrame(expr=ExprNode("log", self))
def log10(self) : return H2OFrame(expr=ExprNode("log10", self))
def log1p(self) : return H2OFrame(expr=ExprNode("log1p", self))
def log2(self) : return H2OFrame(expr=ExprNode("log2", self))
def exp(self) : return H2OFrame(expr=ExprNode("exp", self))
def expm1(self) : return H2OFrame(expr=ExprNode("expm1", self))
def gamma(self) : return H2OFrame(expr=ExprNode("gamma", self))
def lgamma(self) : return H2OFrame(expr=ExprNode("lgamma", self))
def digamma(self) : return H2OFrame(expr=ExprNode("digamma", self))
def trigamma(self): return H2OFrame(expr=ExprNode("trigamma", self))
@staticmethod
def mktime(year=1970,month=0,day=0,hour=0,minute=0,second=0,msec=0):
"""
All units are zero-based (including months and days). Missing year is 1970.
:return: Returns msec since the Epoch.
"""
return H2OFrame(expr=ExprNode("mktime", year,month,day,hour,minute,second,msec))._frame()
def col_names(self):
"""
Retrieve the column names (one name per H2OVec) for this H2OFrame.
:return: A character list[] of column names.
"""
self._eager()
return self._col_names
def sd(self, na_rm=False):
"""
:return: Standard deviation of the H2OVec elements.
"""
return H2OFrame(expr=ExprNode("sd", self,na_rm))._scalar()
def names(self):
"""
Retrieve the column names (one name per H2OVec) for this H2OFrame.
:return: A character list[] of column names.
"""
self._eager()
return self.col_names()
def nrow(self):
"""
Get the number of rows in this H2OFrame.
:return: The number of rows in this dataset.
"""
self._eager()
return self._nrows
def ncol(self):
"""
Get the number of columns in this H2OFrame.
:return: The number of columns in this H2OFrame.
"""
self._eager()
return self._ncols
def filterNACols(self, frac=0.2):
"""
Filter columns with prportion of NAs >= frac.
:param frac: Fraction of NAs in the column.
:return: A list of column indices.
"""
return H2OFrame(expr=ExprNode("filterNACols", self, frac))._frame()
def dim(self):
"""
Get the number of rows and columns in the H2OFrame.
:return: The number of rows and columns in the H2OFrame as a list [rows, cols].
"""
return [self.nrow(), self.ncol()]
def unique(self):
"""
Extract the unique values in the column.
:return: A new H2OFrame of just the unique values in the column.
"""
return H2OFrame(expr=ExprNode("unique", self))._frame()
def show(self): self.head(rows=10,cols=sys.maxint,show=True) # all columns
def head(self, rows=10, cols=200, show=False, **kwargs):
"""
Analgous to R's `head` call on a data.frame. Display a digestible chunk of the H2OFrame starting from the beginning.
:param rows: Number of rows to display.
:param cols: Number of columns to display.
:param show: Display the output.
:param kwargs: Extra arguments passed from other methods.
:return: None
"""
self._eager()
nrows = min(self.nrow(), rows)
ncols = min(self.ncol(), cols)
colnames = self.names()[0:ncols]
head = self[0:10,0:ncols]
res = head.as_data_frame(False)[1:]
if show:
print "First {} rows and first {} columns: ".format(nrows, ncols)
h2o.H2ODisplay(res,colnames)
return head
def tail(self, rows=10, cols=200, show=False, **kwargs):
"""
Analgous to R's `tail` call on a data.frame. Display a digestible chunk of the H2OFrame starting from the end.
:param rows: Number of rows to display.
:param cols: Number of columns to display.
:param kwargs: Extra arguments passed from other methods.
:return: None
"""
self._eager()
nrows = min(self.nrow(), rows)
ncols = min(self.ncol(), cols)
start_idx = max(self.nrow()-nrows,0)
tail = self[start_idx:(start_idx+nrows),:]
res = tail.as_data_frame(False)
colnames = res.pop(0)
if show:
print "Last {} rows and first {} columns: ".format(nrows,ncols)
h2o.H2ODisplay(res,colnames)
return tail
def levels(self, col=None):
"""
Get the factor levels for this frame and the specified column index.
:param col: A column index in this H2OFrame.
:return: a list of strings that are the factor levels for the column.
"""
if self.ncol()==1 or col is None:
lol=h2o.as_list(H2OFrame(expr=ExprNode("levels", self))._frame(), False)[1:]
levels=[level for l in lol for level in l] if self.ncol()==1 else lol
elif col is not None:
lol=h2o.as_list(H2OFrame(expr=ExprNode("levels", ExprNode("[", self, None,col)))._frame(),False)[1:]
levels=[level for l in lol for level in l]
else: levels=None
return None if levels is None or levels==[] else levels
def nlevels(self, col=None):
"""
Get the number of factor levels for this frame and the specified column index.
:param col: A column index in this H2OFrame.
:return: an integer.
"""
nlevels = self.levels(col=col)
return len(nlevels) if nlevels else 0
def setLevel(self, level):
"""
A method to set all column values to one of the levels.
:param level: The level at which the column will be set (a string)
:return: An H2OFrame with all entries set to the desired level
"""
return H2OFrame(expr=ExprNode("setLevel", self, level))._frame()
def setLevels(self, levels):
"""
Works on a single categorical vector. New domains must be aligned with the old domains. This call has SIDE
EFFECTS and mutates the column in place (does not make a copy).
:param level: The level at which the column will be set (a string)
:param x: A single categorical column.
:param levels: A list of strings specifying the new levels. The number of new levels must match the number of
old levels.
:return: None
"""
h2o.rapids(ExprNode("setDomain", self, levels)._eager())
self._update()
return self
def setNames(self,names):
"""
Change the column names to `names`.
:param names: A list of strings equal to the number of columns in the H2OFrame.
:return: None. Rename the column names in this H2OFrame.
"""
h2o.rapids(ExprNode("colnames=", self, range(self.ncol()), names)._eager())
self._update()
return self
def setName(self,col=None,name=None):
"""
Set the name of the column at the specified index.
:param col: Index of the column whose name is to be set.
:param name: The new name of the column to set
:return: the input frame
"""
if not isinstance(col, int) and self.ncol() > 1: raise ValueError("`col` must be an index. Got: " + str(col))
if self.ncol() == 1: col = 0
h2o.rapids(ExprNode("colnames=", self, col, name)._eager())
self._update()
return self
def describe(self):
"""
Generate an in-depth description of this H2OFrame.
The description is a tabular print of the type, min, max, sigma, number of zeros,
and number of missing elements for each H2OVec in this H2OFrame.
:return: None (print to stdout)
"""
self._eager()
thousands_sep = h2o.H2ODisplay.THOUSANDS
print "Rows:", thousands_sep.format(self._nrows), "Cols:", thousands_sep.format(self._ncols)
chunk_dist_sum = h2o.frame(self._id)["frames"][0]
dist_summary = chunk_dist_sum["distribution_summary"]
chunk_summary = chunk_dist_sum["chunk_summary"]
chunk_summary.show()
dist_summary.show()
self.summary()
def summary(self):
"""
Generate summary of the frame on a per-Vec basis.
:return: None
"""
self._eager()
fr_sum = h2o.H2OConnection.get_json("Frames/" + urllib.quote(self._id) + "/summary")["frames"][0]
type = ["type"]
mins = ["mins"]
mean = ["mean"]
maxs = ["maxs"]
sigma= ["sigma"]
zeros= ["zero_count"]
miss = ["missing_count"]
for v in fr_sum["columns"]:
type.append(v["type"])
mins.append(v["mins"][0] if v is not None else v["mins"])
mean.append(v["mean"])
maxs.append(v["maxs"][0] if v is not None else v["maxs"])
sigma.append(v["sigma"])
zeros.append(v["zero_count"])
miss.append(v["missing_count"])
table = [type,mins,maxs,sigma,zeros,miss]
headers = self._col_names
h2o.H2ODisplay(table, [""] + headers, "Column-by-Column Summary")
def __repr__(self):
if sys.gettrace() is None:
self.show()
return ""
def as_date(self,format):
"""
Return the column with all elements converted to millis since the epoch.
:param format: The date time format string
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("as.Date",self,format))
def cumsum(self):
"""
:return: The cumulative sum over the column.
"""
return H2OFrame(expr=ExprNode("cumsum",self))
def cumprod(self):
"""
:return: The cumulative product over the column.
"""
return H2OFrame(expr=ExprNode("cumprod",self))
def cummin(self):
"""
:return: The cumulative min over the column.
"""
return H2OFrame(expr=ExprNode("cummin",self))
def cummax(self):
"""
:return: The cumulative max over the column.
"""
return H2OFrame(expr=ExprNode("cummax",self))
def prod(self,na_rm=False):
"""
:return: The product of the column.
"""
return H2OFrame(expr=ExprNode("prod",self,na_rm))._scalar()
def any(self,na_rm=False):
"""
:return: True if any element is True in the column.
"""
return H2OFrame(expr=ExprNode("any",self,na_rm))._scalar()
def all(self):
"""
:return: True if every element is True in the column.
"""
return H2OFrame(expr=ExprNode("all",self,False))._scalar()
def isnumeric(self):
"""
:return: True if the column is numeric, otherwise return False
"""
return H2OFrame(expr=ExprNode("is.numeric",self))._scalar()
def isstring(self):
"""
:return: True if the column is a string column, otherwise False (same as ischaracter)
"""
return H2OFrame(expr=ExprNode("is.character",self))._scalar()
def ischaracter(self):
"""
:return: True if the column is a character column, otherwise False (same as isstring)
"""
return self.isstring()
def remove_vecs(self, cols):
"""
:param cols: Drop these columns.
:return: A frame with the columns dropped.
"""
self._eager()
is_char = all([isinstance(i,(unicode,str)) for i in cols])
if is_char:
cols = [self._find_idx(col) for col in cols]
cols = sorted(cols)
return H2OFrame(expr=ExprNode("removeVecs",self,cols))._frame()
def structure(self):
"""
Similar to R's str method: Compactly Display the Structure of this H2OFrame instance.
:return: None
"""
df = self.head().as_data_frame(use_pandas=False)
nr = self.nrow()
nc = len(df[0])
cn = df.pop(0)
width = max([len(c) for c in cn])
isfactor = [c.isfactor() for c in self]
numlevels = [self.nlevels(i) for i in range(nc)]
lvls = self.levels()
print "H2OFrame '{}': \t {} obs. of {} variables(s)".format(self._id,nr,nc)
for i in range(nc):
print "$ {} {}: ".format(cn[i], ' '*(width-max(0,len(cn[i])))),
if isfactor[i]:
nl = numlevels[i]
print "Factor w/ {} level(s) {},..: ".format(nl, '"' + '","'.join(zip(*lvls)[i]) + '"'),
print " ".join(it[0] for it in h2o.as_list(self[:10,i].match(list(zip(*lvls)[i])), False)[1:]),
print "..."
else:
print "num {} ...".format(" ".join(it[0] for it in h2o.as_list(self[:10,i], False)[1:]))
def as_data_frame(self, use_pandas=True):
"""
Obtain the dataset as a python-local object (pandas frame if possible, list otherwise)
:param use_pandas: A flag specifying whether or not to attempt to coerce to Pandas.
:return: A local python object containing this H2OFrame instance's data.s
"""
self._eager()
url = 'http://' + h2o.H2OConnection.ip() + ':' + str(h2o.H2OConnection.port()) + "/3/DownloadDataset?frame_id=" + urllib.quote(self._id) + "&hex_string=false"
response = urllib2.urlopen(url)
if h2o.can_use_pandas() and use_pandas:
import pandas
return pandas.read_csv(response, low_memory=False)
else:
cr = csv.reader(response)
rows = []
for row in cr: rows.append([''] if row == [] else row)
return rows
# Find a named H2OVec and return the zero-based index for it. Error is name is missing
def _find_idx(self,name):
for i,v in enumerate(self._col_names):
if name == v: return i
raise ValueError("Name " + name + " not in Frame")
def index(self,name):
self._eager()
return self._find_idx(name)
def __getitem__(self, item):
"""
Frame slicing.
Supports R-like row and column slicing.
Examples:
fr[0:5,:] # first 5 rows, all columns
fr[fr[0] > 1, :] # all rows greater than 1 in the first column, all columns
fr[[1,5,6]] # columns 1, 5, and 6
fr[0:50, [1,2,3]] # first 50 rows, columns 1,2, and 3
:param item: A tuple, a list, a string, or an int.
If a tuple, then this indicates both row and column selection. The tuple
must be exactly length 2.
If a list, then this indicates column selection.
If a int, the this indicates a single column to be retrieved at the index.
If a string, then slice on the column with this name.
:return: An H2OFrame.
"""
if isinstance(item, (int,str,list,slice)): return H2OFrame(expr=ExprNode("[", self, None, item)) # just columns
elif isinstance(item, H2OFrame): return H2OFrame(expr=ExprNode("[",self,item,None))
elif isinstance(item, tuple):
rows = item[0]
cols = item[1]
allrows = False
allcols = False
if isinstance(cols, slice):
allcols = all([a is None for a in [cols.start,cols.step,cols.stop]])
if isinstance(rows, slice):
allrows = all([a is None for a in [rows.start,rows.step,rows.stop]])
if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self
if allrows: return H2OFrame(expr=ExprNode("[",self,None,item[1])) # fr[:,cols] -> really just a column slice
if allcols: return H2OFrame(expr=ExprNode("[",self,item[0],None)) # fr[rows,:] -> really just a row slices
if isinstance(item[0], (str,unicode,int)) and isinstance(item[1],(str,unicode,int)):
return H2OFrame(expr=ExprNode("[", self, item[0], item[1]))._scalar()
return H2OFrame(expr=ExprNode("[",self,item[0],item[1]))
# return H2OFrame(expr=ExprNode("[", ExprNode("[",self,None,item[1]),item[0],None))._scalar()
# return H2OFrame(expr=ExprNode("[", ExprNode("[", self, None, item[1]), item[0], None))
def __setitem__(self, b, c):
"""
Replace a column in an H2OFrame.
:param b: A 0-based index or a column name.
:param c: The vector that 'b' is replaced with.
:return: Returns this H2OFrame.
"""
update_index=-1
if isinstance(b, (str,unicode)): update_index=self.col_names().index(b) if b in self.col_names() else self._ncols
elif isinstance(b, int): update_index=b
lhs = ExprNode("[", self, b, None) if isinstance(b,H2OFrame) else ExprNode("[", self, None, update_index)
rhs = c._frame() if isinstance(c,H2OFrame) else c
col_name = b if (update_index==self._ncols and isinstance(b, (str, unicode))) else ( c._col_names[0] if isinstance(c, H2OFrame) else "" )
sb = ExprNode(",", ExprNode("=",lhs,rhs), ExprNode("colnames=",self,update_index,col_name))._eager() if update_index >= self.ncol() else ExprNode("=",lhs,rhs)._eager()
h2o.rapids(ExprNode._collapse_sb(sb))
self._update()
def __int__(self): return int(self._scalar())
def __float__(self): return self._scalar()
def __del__(self):
if not self._keep and self._computed: h2o.remove(self)
def keep(self): self._keep = True
def drop(self, i):
"""
Returns a Frame with the column at index i dropped.
:param i: Column to drop
:return: Returns an H2OFrame
"""
if isinstance(i, (unicode,str)): i = self._find_idx(i)
return H2OFrame(expr=ExprNode("[", self, None,-(i+1)))._frame()
def __len__(self):
"""
:return: Number of columns in this H2OFrame
"""
return self.ncol()
def quantile(self, prob=None, combine_method="interpolate"):
"""
Compute quantiles over a given H2OFrame.
:param prob: A list of probabilties, default is [0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]. You may provide any sequence of any length.
:param combine_method: For even samples, how to combine quantiles. Should be one of ["interpolate", "average", "low", "hi"]
:return: an H2OFrame containing the quantiles and probabilities.
"""
if len(self) == 0: return self
if not prob: prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]
return H2OFrame(expr=ExprNode("quantile",self,prob,combine_method))._frame()
def cbind(self,data):
"""
:param data: H2OFrame or H2OVec to cbind to self
:return: void
"""
return H2OFrame(expr=ExprNode("cbind", False, self, data))
def rbind(self, data):
"""
Combine H2O Datasets by Rows.
Takes a sequence of H2O data sets and combines them by rows.
:param data: an H2OFrame
:return: self, with data appended (row-wise)
"""
if not isinstance(data, H2OFrame): raise ValueError("`data` must be an H2OFrame, but got {0}".format(type(data)))
return H2OFrame(expr=ExprNode("rbind", self, data))
def split_frame(self, ratios=[0.75], destination_frames=""):
"""
Split a frame into distinct subsets of size determined by the given ratios.
The number of subsets is always 1 more than the number of ratios given.
:param data: The dataset to split.
:param ratios: The fraction of rows for each split.
:param destination_frames: names of the split frames
:return: a list of frames
"""
j = h2o.H2OConnection.post_json("SplitFrame", dataset=self._id, ratios=ratios, destination_frames=destination_frames)
h2o.H2OJob(j, "Split Frame").poll()
return [h2o.get_frame(i["name"]) for i in j["destination_frames"]]
# ddply in h2o
def ddply(self,cols,fun):
"""
:param cols: Column names used to control grouping
:param fun: Function to execute on each group. Right now limited to textual Rapids expression
:return: New frame with 1 row per-group, of results from 'fun'
"""
return H2OFrame(expr=ExprNode("ddply", self, cols, fun))._frame()
def group_by(self,cols,aggregates,order_by=None):
"""
GroupBy
:param cols: The columns to group on.
:param a: A dictionary of aggregates having the following shape: \
{"colname":[aggregate, column, naMethod]}\
e.g.: {"bikes":["count", 0, "all"]}\
The naMethod is one of "all", "ignore", or "rm", which specifies how to handle
NAs that appear in columns that are being aggregated.
"all" - include NAs
"rm" - exclude NAs
"ignore" - ignore NAs in aggregates, but count them (e.g. in denominators for mean, var, sd, etc.)
:param order_by: A list of column names or indices on which to order the results.
:return: The group by frame.
"""
aggs = []
for k in aggregates: aggs += (aggregates[k] + [str(k)])
aggs = h2o.ExprNode("agg", *aggs)
return H2OFrame(expr=ExprNode("GB", self,cols,aggs,order_by))._frame()
def impute(self,column,method="mean",combine_method="interpolate",by=None,inplace=True):
"""
Impute a column in this H2OFrame.
:param column: The column to impute
:param method: How to compute the imputation value.
:param combine_method: For even samples and method="median", how to combine quantiles.
:param by: Columns to group-by for computing imputation value per groups of columns.
:param inplace: Impute inplace?
:return: the imputed frame.
"""
if isinstance(column, (str, unicode)): column = self._find_idx(column)
if isinstance(by, (str, unicode)): by = self._find_idx(by)
return H2OFrame(expr=ExprNode("h2o.impute", self, column, method, combine_method, by, inplace))._frame()
def merge(self, other, allLeft=False, allRite=False):
"""
Merge two datasets based on common column names
:param other: Other dataset to merge. Must have at least one column in common with self, and all columns in common are used as the merge key. If you want to use only a subset of the columns in common, rename the other columns so the columns are unique in the merged result.
:param allLeft: If true, include all rows from the left/self frame
:param allRite: If true, include all rows from the right/other frame
:return: Original self frame enhanced with merged columns and rows
"""
return H2OFrame(expr=ExprNode("merge", self, other, allLeft, allRite))._frame()
def insert_missing_values(self, fraction=0.1, seed=None):
"""
Inserting Missing Values to an H2OFrame
*This is primarily used for testing*. Randomly replaces a user-specified fraction of entries in a H2O dataset with
missing values.
WARNING: This will modify the original dataset. Unless this is intended, this function should only be called on a
subset of the original.
:param fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.
:param seed: A random number used to select which entries to replace with missing values. Default of seed = -1 will
automatically generate a seed in H2O.
:return: H2OFrame with missing values inserted
"""
self._eager()
kwargs = {}
kwargs['dataset'] = self._id
kwargs['fraction'] = fraction
if seed is not None: kwargs['seed'] = seed
job = {}
job['job'] = h2o.H2OConnection.post_json("MissingInserter", **kwargs)
h2o.H2OJob(job, job_type=("Insert Missing Values")).poll()
return self
# generic reducers (min, max, sum, var)
def min(self):
"""
:return: The minimum value of all frame entries
"""
return H2OFrame(expr=ExprNode("min", self))._scalar()
def max(self):
"""
:return: The maximum value of all frame entries
"""
return H2OFrame(expr=ExprNode("max", self))._scalar()
def sum(self):
"""
:return: The sum of all frame entries
"""
return H2OFrame(expr=ExprNode("sum", self))._scalar()
def mean(self,na_rm=False):
"""
:param na_rm: True or False to remove NAs from computation.
:return: The mean of the column.
"""
return H2OFrame(expr=ExprNode("mean", self, 0, na_rm))._scalar()
def median(self):
"""
:return: Median of this column.
"""
return H2OFrame(expr=ExprNode("median", self))._scalar()
def var(self,y=None,na_rm=False,use="everything"):
"""
:param na_rm: True or False to remove NAs from computation.
:param use: One of "everything", "complete.obs", or "all.obs".
:return: The covariance matrix of the columns in this H2OFrame.
"""
return H2OFrame(expr=ExprNode("var", self,y,na_rm,use))
def asfactor(self):
"""
:return: A lazy Expr representing this vec converted to a factor
"""
return H2OFrame(expr=ExprNode("as.factor",self))
def isfactor(self):
"""
:return: A lazy Expr representing the truth of whether or not this vec is a factor.
"""
return H2OFrame(expr=ExprNode("is.factor", self))._scalar()
def anyfactor(self):
"""
:return: Whether or not the frame has any factor columns
"""
return H2OFrame(expr=ExprNode("any.factor", self))._scalar()
def transpose(self):
"""
:return: The transpose of the H2OFrame.
"""
return H2OFrame(expr=ExprNode("t", self))
def strsplit(self, pattern):
"""
Split the strings in the target column on the given pattern
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("strsplit", self, pattern))
def trim(self):
"""
Trim the edge-spaces in a column of strings (only operates on frame with one column)
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("trim", self))
def table(self, data2=None):
"""
:return: a frame of the counts at each combination of factor levels
"""
return H2OFrame(expr=ExprNode("table",self,data2))
def hist(self, breaks="Sturges", plot=True, **kwargs):
"""
Compute a histogram over a numeric column. If breaks=="FD", the MAD is used over the IQR in computing bin width.
:param breaks: breaks Can be one of the following: A string: "Sturges", "Rice", "sqrt", "Doane", "FD", "Scott." A
single number for the number of breaks splitting the range of the vec into number of breaks bins of equal width. Or,
A vector of numbers giving the split points, e.g., c(-50,213.2123,9324834)
:param plot: A logical value indicating whether or not a plot should be generated (default is TRUE).
:return: if plot is True, then return None, else, an H2OFrame with these columns: breaks, counts, mids_true, mids,
and density
"""
frame = H2OFrame(expr=ExprNode("hist", self, breaks))._frame()
total = frame["counts"].sum()
densities = [(frame["counts"][i,:]/total)._scalar()*(1/(frame["breaks"][i,:]._scalar()-frame["breaks"][i-1,:]._scalar())) for i in range(1,frame["counts"].nrow())]
densities.insert(0,0)
densities_frame = H2OFrame(python_obj=[[d] for d in densities])
densities_frame.setNames(["density"])
frame = frame.cbind(densities_frame)
if plot:
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required to make the histogram plot. Set `plot` to False, if a plot is not desired."
return
lower = float(frame["breaks"][0,:])
clist = h2o.as_list(frame["counts"], use_pandas=False)
clist.pop(0)
clist.pop(0)
mlist = h2o.as_list(frame["mids"], use_pandas=False)
mlist.pop(0)
mlist.pop(0)
counts = [float(c[0]) for c in clist]
counts.insert(0,0)
mids = [float(m[0]) for m in mlist]
mids.insert(0,lower)
plt.xlabel(self._col_names[0])
plt.ylabel('Frequency')
plt.title('Histogram of {0}'.format(self._col_names[0]))
plt.bar(mids, counts)
if not ('server' in kwargs.keys() and kwargs['server']): plt.show()
else: return frame
def sub(self, pattern, replacement, ignore_case=False):
"""
sub and gsub perform replacement of the first and all matches respectively.
Of note, mutates the frame.
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("sub",pattern,replacement,self,ignore_case))
def gsub(self, pattern, replacement, ignore_case=False):
"""
sub and gsub perform replacement of the first and all matches respectively.
Of note, mutates the frame.
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("gsub", pattern, replacement, self, ignore_case))
def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param factors: factors Factor columns (either indices or column names).
:param pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all
factor will be made)
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: A string indicating the destination key. If empty, this will be auto-generated by H2O.
:return: H2OFrame
"""
return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,
min_occurrence=min_occurrence, destination_frame=destination_frame)
def toupper(self):
"""
Translate characters from lower to upper case for a particular column
Of note, mutates the frame.
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("toupper", self))
def tolower(self):
"""
Translate characters from upper to lower case for a particular column
Of note, mutates the frame.
:return: H2OFrame
"""
return H2OFrame(expr=ExprNode("tolower", self))
def rep_len(self, length_out):
"""
Replicates the values in `data` in the H2O backend
:param length_out: the number of columns of the resulting H2OFrame
:return: an H2OFrame
"""
return H2OFrame(expr=ExprNode("rep_len", self, length_out))
def scale(self, center=True, scale=True):
"""
Centers and/or scales the columns of the H2OFrame
:return: H2OFrame
:param center: either a ‘logical’ value or numeric list of length equal to the number of columns of the H2OFrame
:param scale: either a ‘logical’ value or numeric list of length equal to the number of columns of H2OFrame.
"""
return H2OFrame(expr=ExprNode("scale", self, center, scale))
def signif(self, digits=6):
"""
:return: The rounded values in the H2OFrame to the specified number of significant digits.
"""
return H2OFrame(expr=ExprNode("signif", self, digits))
def round(self, digits=0):
"""
:return: The rounded values in the H2OFrame to the specified number of decimal digits.
"""
return H2OFrame(expr=ExprNode("round", self, digits))
def asnumeric(self):
"""
:return: A frame with factor columns converted to numbers (numeric columns untouched).
"""
return H2OFrame(expr=ExprNode("as.numeric", self))
def ascharacter(self):
"""
:return: A lazy Expr representing this vec converted to characters
"""
return H2OFrame(expr=ExprNode("as.character", self))
def na_omit(self):
"""
:return: Removes rows with NAs
"""
return H2OFrame(expr=ExprNode("na.omit", self))._frame()
def isna(self):
"""
:return: Returns a new boolean H2OVec.
"""
return H2OFrame(expr=ExprNode("is.na", self))
def year(self):
"""
:return: Returns a new year column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("year", self))
def month(self):
"""
:return: Returns a new month column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("month", self))
def week(self):
"""
:return: Returns a new week column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("week", self))
def day(self):
"""
:return: Returns a new day column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("day", self))
def dayOfWeek(self):
"""
:return: Returns a new Day-of-Week column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("dayOfWeek", self))
def hour(self):
"""
:return: Returns a new Hour-of-Day column from a msec-since-Epoch column
"""
return H2OFrame(expr=ExprNode("hour", self))
def runif(self, seed=None):
"""
:param seed: A random seed. If None, then one will be generated.
:return: A new H2OVec filled with doubles sampled uniformly from [0,1).
"""
return H2OFrame(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed))
def match(self, table, nomatch=0):
"""
Makes a vector of the positions of (first) matches of its first argument in its second.
:return: bit H2OVec
"""
return H2OFrame(expr=ExprNode("match", self, table, nomatch, None))
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into factor "buckets". Similar to R's cut method.
:param breaks: The cut points in the numeric vector (must span the range of the col.)
:param labels: Factor labels, defaults to set notation of intervals defined by breaks.s
:param include_lowest: By default, cuts are defined as (lo,hi]. If True, get [lo,hi].
:param right: Include the high value: (lo,hi]. If False, get (lo,hi).
:param dig_lab: Number of digits following the decimal point to consider.
:return: A factor column.
"""
return H2OFrame(expr=ExprNode("cut",self,breaks,labels,include_lowest,right,dig_lab))
# flow-coding result methods
def _scalar(self):
res = self.as_data_frame(False)
if res[1] == []: return float("nan")
res = res[1][0]
if res == "TRUE": return True
if res == "FALSE":return False
try: return float(res)
except: return res
def _frame(self): # force an eval on the frame and return it
self._eager()
return self
##### DO NOT ADD METHODS BELOW THIS LINE (pretty please) #####
def _eager(self, pytmp=True):
if not self._computed:
# top-level call to execute all subparts of self._ast
sb = self._ast._eager()
if pytmp:
h2o.rapids(ExprNode._collapse_sb(sb), self._id)
sb = ["%", self._id," "]
self._update() # fill out _nrows, _ncols, _col_names, _computed
return sb
def _do_it(self,sb):
# this method is only ever called from ExprNode._do_it
# it's the "long" way 'round the mutual recursion from ExprNode to H2OFrame
#
# Here's a diagram that illustrates the call order:
#
# H2OFrame: ExprNode:
# _eager ----------------> _eager
#
# ^^ ^^ ||
# || || \/
#
# _do_it <---------------- _do_it
#
# the "long" path:
# pending exprs in DAG with exterior refs must be saved (refs >= magic count)
#
if self._computed: sb += ['%',self._id+" "]
else: sb += self._eager(True) if (len(gc.get_referrers(self)) >= H2OFrame.MAGIC_REF_COUNT) else self._eager(False)
def _update(self):
res = h2o.frame(self._id)["frames"][0] # TODO: exclude here?
self._nrows = res["rows"]
self._ncols = len(res["columns"])
self._col_names = [c["label"] for c in res["columns"]]
self._computed=True
self._ast=None
#### DO NOT ADD METHODS HERE!!! ####
# private static methods
def _py_tmp_key(): return unicode("py" + str(uuid.uuid4()))
def _gen_header(cols): return ["C" + str(c) for c in range(1, cols + 1, 1)]
def _check_lists_of_lists(python_obj):
# all items in the list must be a list too
lol_all = all(isinstance(l, (tuple, list)) for l in python_obj)
# All items in the list must be a list!
if not lol_all:
raise ValueError("`python_obj` is a mixture of nested lists and other types.")
# in fact, we must have a list of flat lists!
for l in python_obj:
if any(isinstance(ll, (tuple, list)) for ll in l):
raise ValueError("`python_obj` is not a list of flat lists!")
def _handle_python_lists(python_obj):
cols = len(python_obj) # cols will be len(python_obj) if not a list of lists
lol = _is_list_of_lists(python_obj) # do we have a list of lists: [[...], ..., [...]] ?
if lol:
_check_lists_of_lists(python_obj) # must be a list of flat lists, raise ValueError if not
# have list of lists, each list is a row
# length of the longest list is the number of columns
cols = max([len(l) for l in python_obj])
# create the header
header = _gen_header(cols)
# shape up the data for csv.DictWriter
data_to_write = [dict(zip(header, row)) for row in python_obj] if lol else [dict(zip(header, python_obj))]
return header, data_to_write
def _is_list(l) : return isinstance(l, (tuple, list))
def _is_str_list(l): return isinstance(l, (tuple, list)) and all([isinstance(i,(str,unicode)) for i in l])
def _is_num_list(l): return isinstance(l, (tuple, list)) and all([isinstance(i,(float,int )) for i in l])
def _is_list_of_lists(o): return any(isinstance(l, (list, tuple)) for l in o)
def _handle_numpy_array(python_obj): return _handle_python_lists(python_obj=python_obj.tolist())
def _handle_pandas_data_frame(python_obj): return _handle_numpy_array(python_obj=python_obj.as_matrix())
def _handle_python_dicts(python_obj):
header = python_obj.keys()
is_valid = all([re.match(r'^[a-zA-Z_][a-zA-Z0-9_.]*$', col) for col in header]) # is this a valid header?
if not is_valid:
raise ValueError("Did not get a valid set of column names! Must match the regular expression: ^[a-zA-Z_][a-zA-Z0-9_.]*$ ")
for k in python_obj: # check that each value entry is a flat list/tuple
v = python_obj[k]
if isinstance(v, (tuple, list)): # if value is a tuple/list, then it must be flat
if _is_list_of_lists(v):
raise ValueError("Values in the dictionary must be flattened!")
rows = map(list, itertools.izip_longest(*python_obj.values()))
data_to_write = [dict(zip(header, row)) for row in rows]
return header, data_to_write
|
apache-2.0
|
Dih5/mcareader
|
mcareader.py
|
1
|
9128
|
"""A minimal python interface to read Amptek's mca files"""
import re
import sys
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.stats import linregress
from io import open # python 2 compatibility
__author__ = 'Dih5'
__version__ = "0.4.0"
def _str_to_array(s, sep=" "):
"""
Convert a string to a numpy array
Args:
s (str): The string
sep (str): The separator in the string.
Returns:
(`numpy.ndarray`): The 2D array with the data.
"""
line_len = len(np.fromstring(s[:s.find('\n')], sep=sep))
return np.reshape(np.fromstring(s, sep=sep), (-1, line_len))
class Mca:
"""
A mca file.
Attributes:
raw(str): The text of the file.
calibration_points (`numpy.ndarray`): The 2D array with the calibration data or None if there is no such
information.
"""
def __init__(self, file, encoding='iso-8859-15', calibration=None):
"""
Load the content of a mca file.
Args:
file(str): The path to the file.
encoding (str): The encoding to use to read the file.
calibration (str or list): A path with a file used to read calibration or a 2D matrix describing
it.
"""
with open(file, "r", encoding=encoding) as f:
self.raw = f.read()
if calibration is None:
self.calibration_points = self.get_calibration_points()
elif isinstance(calibration, str):
self.calibration_points = Mca(calibration, encoding=encoding).get_calibration_points()
else:
self.calibration_points = np.asarray(calibration)
if self.calibration_points is None:
warnings.warn("Warning: no calibration data was found. Using channel number instead of energy")
def get_section(self, section):
"""
Find the str representing a section in the MCA file.
Args:
section(str): The name of the section to search for.
Returns:
(str): The text of the section or "" if not found.
"""
m = re.match(r"(?:.*)(^<<%s>>$)(.*?)(?:<<.*>>)" % section, self.raw, re.DOTALL + re.MULTILINE)
return m.group(2).strip() if m else ""
def get_variable(self, variable):
"""
Find the str representing a variable in the MCA file.
Args:
variable(str): The name of the variable to search for.
Returns:
(str): The text of the value or "" if not found.
"""
# There are various patterns used in the file, depending on the section
# Just try them all
# Pattern 1: FOO - VALUE
m = re.match(r"(?:.*)%s - (.*?)$" % variable, self.raw, re.DOTALL + re.MULTILINE)
# Pattern 2: FOO=VALUE; EXPLANATION
if not m:
m = re.match(r"(?:.*)%s=(.*?);" % variable, self.raw, re.DOTALL + re.MULTILINE)
# Pattern 3: FOO: VALUE
if not m:
m = re.match(r"(?:.*)%s: (.*?)$" % variable, self.raw, re.DOTALL + re.MULTILINE)
return m.group(1).strip() if m else ""
def get_calibration_points(self):
"""
Get the calibration points from the MCA file, regardless of the calibration parameter used to create the object.
Returns:
(`numpy.ndarray`): The 2D array with the data or None if there is no calibration data.
"""
cal = self.get_section("CALIBRATION")
if not cal:
return
cal = cal[cal.find('\n') + 1:] # remove first line
return _str_to_array(cal)
def get_calibration_function(self, method=None):
"""
Get a calibration function from the file.
Args:
method(str): The method to use. Available methods include:
* 'bestfit': A linear fit in the sense of least-squares (default).
* 'interpolation': Use a linear interpolation.
Returns:
(`Callable`): A function mapping channel number to energy. Note the first channel is number 0.
"""
points = self.calibration_points
if points is None:
# User was already warned when the object was created.
return np.vectorize(lambda x: x)
info = sys.version_info
if info[0] == 3 and info[1] < 4 or info[0] == 2 and info[1] < 7: # py2 < 2.7 or py3 < 3.4
extrapolation_support = False
else:
extrapolation_support = True
if method is None:
method = "bestfit"
if method == "interpolation" and not extrapolation_support:
warnings.warn("Warning: extrapolation not supported with active Python interpreter. Using best fit instead")
method = "bestfit"
if method == "interpolation":
return interpolate.interp1d(points[:, 0], points[:, 1], fill_value="extrapolate")
elif method == "bestfit":
slope, intercept, _, _, _ = linregress(points[:, 0], points[:, 1])
return np.vectorize(lambda x: slope * x + intercept)
else:
raise ValueError("Unknown method: %s" % method)
def get_points(self, calibration_method=None, trim_zeros=True, background=None):
"""
Get the points of the spectrum.
Args:
calibration_method (str): The method used for the calibration. See `get_calibration_function`.
trim_zeros (bool): Whether to remove values with no counts.
background (`Mca`): An spectrum describing a background to subtract from the returned points. The background
is scaled using the REAL_TIME parameters.
Returns:
(tuple): tuple containing:
x (List[float]): The list of x coordinates (mean bin energy).
y (List[float]): The list of y coordinates (counts in each bin).
"""
f = self.get_calibration_function(method=calibration_method)
yy = _str_to_array(self.get_section("DATA"))[:, 0]
if background:
background_yy = _str_to_array(background.get_section("DATA"))[:, 0]
yy -= background_yy * (float(self.get_variable("REAL_TIME")) / float(background.get_variable("REAL_TIME")))
xx = f(range(len(yy)))
if trim_zeros:
yy = np.trim_zeros(yy, 'f')
xx = xx[len(xx) - len(yy):]
yy = np.trim_zeros(yy, 'b')
removed_count = len(yy) - len(xx)
if removed_count: # Then removed_count is negative
xx = xx[:len(yy) - len(xx)]
return xx, yy
def get_counts(self, calibration_method=None, background=None):
"""
Get the number of counts in the spectrum.
Args:
calibration_method (str): The method used for the calibration. See `get_calibration_function`.
background (`Mca`): An spectrum describing a background to subtract from the returned points. The background
is scaled using the REAL_TIME parameters.
Returns:
(float): Number of counts in the spectrum.
"""
xx, yy = self.get_points(calibration_method=calibration_method, background=background)
return sum(yy)
def get_total_energy(self, calibration_method=None, background=None):
"""
Get the total energy in the spectrum.
Args:
calibration_method (str): The method used for the calibration. See `get_calibration_function`.
background (`Mca`): An spectrum describing a background to subtract from the returned points. The background
is scaled using the REAL_TIME parameters.
Returns:
(float): Total energy of counts in the spectrum, in the units set in the calibration.
If there is no calibration available, a meaningless number is returned.
"""
xx, yy = self.get_points(calibration_method=calibration_method, background=background)
return sum([x * y for x, y in zip(xx, yy)])
def plot(self, log_y=False, log_x=False, calibration_method=None, background=None):
"""
Show a plot of the spectrum.
Args:
log_y(bool): Whether the y-axis is in logarithmic scale.
log_x(bool): Whether the x-axis is in logarithmic scale.
calibration_method (str): The method used for the calibration. See `get_calibration_function`.
background (`Mca`): An spectrum describing a background to subtract from the returned points. The background
is scaled using the REAL_TIME parameters.
"""
xx, yy = self.get_points(calibration_method=calibration_method, background=background)
if log_y and log_x:
plt.loglog(xx, yy)
elif log_y:
plt.semilogy(xx, yy)
elif log_x:
plt.semilogx(xx, yy)
else:
plt.plot(xx, yy)
plt.show()
|
lgpl-3.0
|
mwv/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
ctralie/SlidingWindowVideoTDA
|
DoublePendulum.py
|
1
|
2021
|
# Python Example
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (M1 + M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_) +
M2*G*sin(state[2])*cos(del_) +
M2*L2*state[3]*state[3]*sin(del_) -
(M1 + M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_) +
(M1 + M2)*G*sin(state[0])*cos(del_) -
(M1 + M2)*L1*state[1]*state[1]*sin(del_) -
(M1 + M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.05 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
# initial state
state = np.radians([th1, w1, th2, w2])
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1*sin(y[:, 0])
y1 = -L1*cos(y[:, 0])
x2 = L2*sin(y[:, 2]) + x1
y2 = -L2*cos(y[:, 2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=40)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
plt.axis('off')
for i in range(len(t)):
line.set_data([0, x1[i], x2[i]], [0, y1[i], y2[i]])
plt.savefig("DoublePendulum/%i.png"%i)
|
apache-2.0
|
hawkw/sqlviz
|
sqlviz.py
|
1
|
4672
|
#! /usr/bin/env python3
"""SQLViz
Usage:
sqlviz [-hnkdlpo DIR] <file>
Options:
-h --help Display this help file
-k --keys Graph the number of foreign keys vs primary keys
-d --datatypes Graph the distribution of datatypes
-l --lengths Graph the distribution of lengths for each data type
-p --print Print text to the console as well as creating graphs
-n --no-display Don't display the generated graphs.
-o DIR --output=DIR Output graphs to the specified directory
"""
from docopt import docopt
from matplotlib import pyplot
import re
class Schema:
"""
Wraps the SQL source code for a schema and provides methods to get information about that schema.
"""
table_def = re.compile(r"CREATE TABLE|create table")
primary_key = re.compile(r"PRIMARY KEY|primary key")
foreign_key = re.compile(r"FOREIGN KEY|foreign key")
varchar = re.compile(r"(?:VARCHAR|varchar)\s*\((\d+)\)")
decimal = re.compile(r"(?:DECIMAL|decimal)\s*\((\d+,\d+)\)")
decimal_extract = re.compile(r"(?P<p>\d+),\s*(?P<d>\d+)")
integer = re.compile(r"(INT|int|INTEGER|integer)(\s|,)")
text = re.compile(r"(TEXT|text)(\s|,)")
numeric = re.compile(r"(?:NUMERIC|numeric)\s*\((\d+,\s*\d+)\)")
def __init__(self, source):
"""
Creates a new instance of Schema for the specified source code string.
"""
self.source = source
def n_tables(self):
"""
Returns the number of tables defined in the schema
"""
return len(Schema.table_def.findall(self.source))
def n_keys(self):
"""
Returns the number of keys defined in the schema
"""
return {"PRIMARY KEY": len(Schema.primary_key.findall(self.source)),
"FOREIGN KEY": len(Schema.foreign_key.findall(self.source))}
def n_datatypes(self):
"""
Returns the number of each data type in the schema.
"""
return {"INT": len(Schema.integer.findall(self.source)),
"DECIMAL": len(Schema.decimal.findall(self.source)),
"NUMERIC": len(Schema.numeric.findall(self.source)),
"VARCHAR": len(Schema.varchar.findall(self.source)),
"TEXT": len(Schema.text.findall(self.source))}
def lengths(self):
"""
Returns a dictionary mapping each data type in the schema
to a list of the lengths of those data types.
"""
return {"VARCHAR": [int(v) for v in Schema.varchar.findall(self.source)],
"DECIMAL": [(int(d.group("p")), int(d.group("d"))) for d in map(Schema.decimal_extract.search,Schema.decimal.findall(self.source))],
"NUMERIC": [(int(d.group("p")), int(d.group("d"))) for d in map(Schema.decimal_extract.search,Schema.numeric.findall(self.source))]
}
if __name__ == "__main__":
opts = docopt(__doc__, help=True, version="0.1")
with open(opts["<file>"], 'r') as f:
source = f.read()
schema = Schema(source)
# Begin plotting
fignum = 0
# pie chart of keys
if opts["--keys"]: # pie chart of keys
pyplot.fignum = fignum + 1
pyplot.figure(fignum, figsize=(6,6))
pyplot.ax = pyplot.axes([0.1, 0.1, 0.8, 0.8])
keys = schema.n_keys()
total_keys = keys["PRIMARY KEY"] + keys["FOREIGN KEY"]
fracs = [ # determine fractions of primary/foreign
(keys["PRIMARY KEY"]/total_keys)*100, (keys["FOREIGN KEY"]/total_keys)*100,
]
pyplot.pie(fracs, labels = ["primary", "foreign"], autopct='%1.1f%%')
pyplot.title("Key Composition")
if not opts["--no-display"]:
pyplot.show()
if opts["--output"]:
pyplot.savefig((opts["--output"] + "/keys.pdf"))
# pie chart of datatypes
if opts["--datatypes"]: # pie chart of datatypes
pyplot.fignum = fignum + 1
pyplot.figure(fignum, figsize=(6,6))
pyplot.ax = pyplot.axes([0.1, 0.1, 0.8, 0.8])
datatypes = schema.n_datatypes()
n_d = {}
for d in datatypes: # remove zero values
if datatypes[d] != 0:
n_d[d] = datatypes[d]
datatypes = n_d
total_datatypes = sum(datatypes.values())
fracs = [ (v/total_datatypes) * 100 for v in datatypes.values() ]
pyplot.pie(fracs, labels = datatypes.keys(), autopct='%1.1f%%')
pyplot.title("Datatype Composition")
if not opts["--no-display"]:
pyplot.show()
if opts["--output"]:
pyplot.savefig((opts["--output"] + "/datatypes.pdf"))
|
mit
|
startcode/apollo
|
modules/tools/mapshow/plot_smoothness.py
|
2
|
1267
|
import rospy
from modules.planning.proto import planning_pb2
import matplotlib.pyplot as plt
from planning import Planning
import matplotlib.animation as animation
from subplot_traj_speed import TrajSpeedSubplot
from subplot_traj_acc import TrajAccSubplot
from subplot_traj_path import TrajPathSubplot
planning = Planning()
def update(frame_number):
traj_speed_subplot.show(planning)
traj_acc_subplot.show(planning)
traj_path_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_traj_data()
def add_listener():
rospy.init_node('st_plot', anonymous=True)
rospy.Subscriber('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((2, 2), (0, 0))
traj_speed_subplot = TrajSpeedSubplot(ax)
ax2 = plt.subplot2grid((2, 2), (0, 1))
traj_acc_subplot = TrajAccSubplot(ax2)
ax3 = plt.subplot2grid((2, 2), (1, 0))
traj_path_subplot = TrajPathSubplot(ax3)
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
|
apache-2.0
|
ClinicalGraphics/scikit-image
|
doc/examples/edges/plot_convex_hull.py
|
9
|
1487
|
"""
===========
Convex Hull
===========
The convex hull of a binary image is the set of pixels included in the
smallest convex polygon that surround all white pixels in the input.
In this example, we show how the input pixels (white) get filled in by the
convex hull (white and grey).
A good overview of the algorithm is given on `Steve Eddin's blog
<http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import convex_hull_image
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float)
original_image = np.copy(image)
chull = convex_hull_image(image)
image[chull] += 1
# image is now:
# [[ 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 2. 0. 0. 0. 0.]
# [ 0. 0. 0. 2. 1. 2. 0. 0. 0.]
# [ 0. 0. 2. 1. 1. 1. 2. 0. 0.]
# [ 0. 2. 1. 1. 1. 1. 1. 2. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.set_title('Original picture')
ax1.imshow(original_image, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_xticks([]), ax1.set_yticks([])
ax2.set_title('Transformed picture')
ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_xticks([]), ax2.set_yticks([])
plt.show()
|
bsd-3-clause
|
giorgiop/scikit-learn
|
examples/svm/plot_separating_hyperplane_unbalanced.py
|
329
|
1850
|
"""
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
mdespriee/spark
|
python/pyspark/sql/tests/test_arrow.py
|
4
|
19752
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from pyspark.sql import Row
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from pyspark.util import _exception_message
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
CamDavidsonPilon/lifelines
|
lifelines/calibration.py
|
1
|
4092
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from lifelines.utils import CensoringType
from lifelines.fitters import RegressionFitter
from lifelines import CRCSplineFitter
def survival_probability_calibration(model: RegressionFitter, df: pd.DataFrame, t0: float, ax=None):
r"""
Smoothed calibration curves for time-to-event models. This is analogous to
calibration curves for classification models, extended to handle survival probabilities
and censoring. Produces a matplotlib figure and some metrics.
We want to calibrate our model's prediction of :math:`P(T < \text{t0})` against the observed frequencies.
Parameters
-------------
model:
a fitted lifelines regression model to be evaluated
df: DataFrame
a DataFrame - if equal to the training data, then this is an in-sample calibration. Could also be an out-of-sample
dataset.
t0: float
the time to evaluate the probability of event occurring prior at.
Returns
----------
ax:
mpl axes
ICI:
mean absolute difference between predicted and observed
E50:
median absolute difference between predicted and observed
https://onlinelibrary.wiley.com/doi/full/10.1002/sim.8570
"""
def ccl(p):
return np.log(-np.log(1 - p))
if ax is None:
ax = plt.gca()
T = model.duration_col
E = model.event_col
predictions_at_t0 = np.clip(1 - model.predict_survival_function(df, times=[t0]).T.squeeze(), 1e-10, 1 - 1e-10)
# create new dataset with the predictions
prediction_df = pd.DataFrame({"ccl_at_%d" % t0: ccl(predictions_at_t0), T: df[T], E: df[E]})
# fit new dataset to flexible spline model
# this new model connects prediction probabilities and actual survival. It should be very flexible, almost to the point of overfitting. It's goal is just to smooth out the data!
knots = 3
regressors = {"beta_": ["ccl_at_%d" % t0], "gamma0_": "1", "gamma1_": "1", "gamma2_": "1"}
# this model is from examples/royson_crowther_clements_splines.py
crc = CRCSplineFitter(knots, penalizer=0.000001)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if CensoringType.is_right_censoring(model):
crc.fit_right_censoring(prediction_df, T, E, regressors=regressors)
elif CensoringType.is_left_censoring(model):
crc.fit_left_censoring(prediction_df, T, E, regressors=regressors)
elif CensoringType.is_interval_censoring(model):
crc.fit_interval_censoring(prediction_df, T, E, regressors=regressors)
# predict new model at values 0 to 1, but remember to ccl it!
x = np.linspace(np.clip(predictions_at_t0.min() - 0.01, 0, 1), np.clip(predictions_at_t0.max() + 0.01, 0, 1), 100)
y = 1 - crc.predict_survival_function(pd.DataFrame({"ccl_at_%d" % t0: ccl(x)}), times=[t0]).T.squeeze()
# plot our results
ax.set_title("Smoothed calibration curve of \npredicted vs observed probabilities of t ≤ %d mortality" % t0)
color = "tab:red"
ax.plot(x, y, label="smoothed calibration curve", color=color)
ax.set_xlabel("Predicted probability of \nt ≤ %d mortality" % t0)
ax.set_ylabel("Observed probability of \nt ≤ %d mortality" % t0, color=color)
ax.tick_params(axis="y", labelcolor=color)
# plot x=y line
ax.plot(x, x, c="k", ls="--")
ax.legend()
# plot histogram of our original predictions
color = "tab:blue"
twin_ax = ax.twinx()
twin_ax.set_ylabel("Count of \npredicted probabilities", color=color) # we already handled the x-label with ax1
twin_ax.tick_params(axis="y", labelcolor=color)
twin_ax.hist(predictions_at_t0, alpha=0.3, bins="sqrt", color=color)
plt.tight_layout()
deltas = ((1 - crc.predict_survival_function(prediction_df, times=[t0])).T.squeeze() - predictions_at_t0).abs()
ICI = deltas.mean()
E50 = np.percentile(deltas, 50)
print("ICI = ", ICI)
print("E50 = ", E50)
return ax, ICI, E50
|
mit
|
matplotlib/cmocean
|
cmocean/rgb/delta-blue.py
|
2
|
13698
|
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {'xp': [-3.4830597643097576, 0.6289831887553419, -17.483059764309758, -36.149726430976415, -10.119506350699233, -1.5386153198653005],
'yp': [-16.277777777777771, -46.80680359435172, -12.0, -2.6666666666666572, -4.590430134624931, 5.8888888888888857],
'min_Jp': 15.1681957187,
'max_Jp': 98.6544342508}
cm_data = [[ 0.06597739, 0.12386005, 0.24948116],
[ 0.06865758, 0.1266325 , 0.25557624],
[ 0.07132312, 0.12939515, 0.26166391],
[ 0.07396584, 0.13214058, 0.2677948 ],
[ 0.07658629, 0.13486916, 0.27396904],
[ 0.07919242, 0.13758843, 0.28014206],
[ 0.08176925, 0.14028419, 0.28640499],
[ 0.08433407, 0.14297318, 0.29265629],
[ 0.08687299, 0.14564215, 0.29898019],
[ 0.08939564, 0.14830082, 0.30531941],
[ 0.09189721, 0.15094484, 0.311703 ],
[ 0.09437767, 0.1535746 , 0.31813071],
[ 0.09684032, 0.15619402, 0.32458157],
[ 0.09927797, 0.15879644, 0.33109756],
[ 0.10169939, 0.16139154, 0.33762358],
[ 0.10409298, 0.1639684 , 0.34422672],
[ 0.10647003, 0.16653949, 0.35083602],
[ 0.10881765, 0.16909286, 0.35752405],
[ 0.11114624, 0.1716403 , 0.36422476],
[ 0.11344526, 0.17417268, 0.37099405],
[ 0.11572015, 0.17669703, 0.37779407],
[ 0.11796711, 0.17921142, 0.38463932],
[ 0.12018172, 0.18171361, 0.39154591],
[ 0.12237222, 0.18421369, 0.39845983],
[ 0.12451836, 0.18669519, 0.40547913],
[ 0.12663654, 0.18917624, 0.41250469],
[ 0.12871435, 0.19164859, 0.41958831],
[ 0.13074695, 0.19411179, 0.42673806],
[ 0.13274084, 0.19657524, 0.43390608],
[ 0.13468282, 0.19903239, 0.44113668],
[ 0.13656674, 0.2014837 , 0.44843501],
[ 0.13839602, 0.20393744, 0.4557622 ],
[ 0.14016368, 0.20639447, 0.46312314],
[ 0.14185409, 0.20885126, 0.47055044],
[ 0.14345986, 0.21131167, 0.47803591],
[ 0.14497642, 0.2137823 , 0.485557 ],
[ 0.146391 , 0.21626634, 0.49311434],
[ 0.14768864, 0.218768 , 0.50070638],
[ 0.14885173, 0.22129285, 0.50832846],
[ 0.14985965, 0.22384818, 0.51597154],
[ 0.1506884 , 0.2264434 , 0.52362079],
[ 0.15131023, 0.22909058, 0.53125369],
[ 0.15168425, 0.23180303, 0.53885457],
[ 0.15175702, 0.23459819, 0.54640144],
[ 0.15150762, 0.23750367, 0.55379602],
[ 0.15085272, 0.24054407, 0.56100954],
[ 0.149778 , 0.24375297, 0.56790391],
[ 0.1482413 , 0.24715977, 0.57437788],
[ 0.14626774, 0.25078506, 0.58029534],
[ 0.14393556, 0.25462901, 0.58555075],
[ 0.14135408, 0.25866955, 0.59010335],
[ 0.13864079, 0.26286871, 0.59397852],
[ 0.13589174, 0.26718438, 0.59724867],
[ 0.13319563, 0.27157423, 0.60000635],
[ 0.13058114, 0.2760103 , 0.60234289],
[ 0.12806719, 0.28047115, 0.60433583],
[ 0.12569199, 0.28493491, 0.60605594],
[ 0.12343537, 0.28939604, 0.60755039],
[ 0.12131237, 0.29384382, 0.60886431],
[ 0.1193314 , 0.29827105, 0.61003449],
[ 0.11747335, 0.3026784 , 0.61108156],
[ 0.11574105, 0.30706267, 0.61202824],
[ 0.11415261, 0.31141804, 0.61290024],
[ 0.11269166, 0.31574724, 0.61370617],
[ 0.11135538, 0.32005035, 0.61445721],
[ 0.11014377, 0.32432716, 0.6151638 ],
[ 0.10905648, 0.32857778, 0.61583463],
[ 0.10809281, 0.3328026 , 0.61647697],
[ 0.10725185, 0.33700217, 0.61709693],
[ 0.10653241, 0.34117717, 0.61769964],
[ 0.10593314, 0.34532836, 0.61828945],
[ 0.1054525 , 0.34945656, 0.61887003],
[ 0.10508879, 0.35356261, 0.61944452],
[ 0.10484013, 0.35764738, 0.62001559],
[ 0.10470452, 0.36171173, 0.62058554],
[ 0.10467981, 0.36575652, 0.62115635],
[ 0.10476371, 0.36978259, 0.62172971],
[ 0.10495382, 0.37379076, 0.62230709],
[ 0.10524762, 0.37778184, 0.62288978],
[ 0.10564252, 0.3817566 , 0.62347886],
[ 0.1061358 , 0.38571579, 0.62407531],
[ 0.10672471, 0.38966014, 0.62467996],
[ 0.10740642, 0.39359035, 0.62529352],
[ 0.10817806, 0.39750711, 0.62591662],
[ 0.10903675, 0.40141105, 0.62654981],
[ 0.10998202, 0.40530231, 0.62719516],
[ 0.11100819, 0.40918206, 0.62785128],
[ 0.11211222, 0.41305092, 0.62851837],
[ 0.11329127, 0.41690943, 0.62919672],
[ 0.11454253, 0.42075813, 0.62988653],
[ 0.11586327, 0.42459754, 0.63058799],
[ 0.11725083, 0.42842816, 0.63130122],
[ 0.1187026 , 0.43225046, 0.63202632],
[ 0.12021702, 0.4360647 , 0.63276403],
[ 0.12179033, 0.43987161, 0.6335134 ],
[ 0.1234202 , 0.44367161, 0.63427436],
[ 0.12510445, 0.4474651 , 0.6350469 ],
[ 0.12684102, 0.45125247, 0.63583095],
[ 0.12862799, 0.45503408, 0.63662642],
[ 0.13046351, 0.45881028, 0.6374332 ],
[ 0.13234563, 0.46258148, 0.63825092],
[ 0.13427263, 0.46634802, 0.63907929],
[ 0.13624311, 0.4701102 , 0.63991816],
[ 0.13825578, 0.47386832, 0.64076733],
[ 0.14030945, 0.47762263, 0.64162657],
[ 0.14240307, 0.48137341, 0.64249562],
[ 0.14453572, 0.48512091, 0.64337422],
[ 0.14670608, 0.48886546, 0.64426159],
[ 0.14891319, 0.49260737, 0.64515709],
[ 0.15115711, 0.49634669, 0.64606096],
[ 0.15343746, 0.50008362, 0.64697283],
[ 0.15575396, 0.50381833, 0.64789234],
[ 0.15810648, 0.50755096, 0.64881909],
[ 0.160495 , 0.51128167, 0.64975267],
[ 0.16291965, 0.51501059, 0.65069263],
[ 0.16538066, 0.51873781, 0.6516385 ],
[ 0.16787709, 0.52246378, 0.65258827],
[ 0.17041079, 0.52618823, 0.65354289],
[ 0.17298246, 0.52991124, 0.65450185],
[ 0.17559291, 0.53363281, 0.65546464],
[ 0.1782431 , 0.53735298, 0.65643069],
[ 0.18093416, 0.54107173, 0.65739946],
[ 0.18366735, 0.54478903, 0.65837034],
[ 0.18644408, 0.54850484, 0.65934275],
[ 0.18926596, 0.55221906, 0.66031605],
[ 0.19213472, 0.5559316 , 0.66128961],
[ 0.19505228, 0.55964231, 0.66226278],
[ 0.19802076, 0.56335105, 0.66323489],
[ 0.20104242, 0.5670576 , 0.66420528],
[ 0.20411976, 0.57076173, 0.66517325],
[ 0.20725543, 0.57446319, 0.66613812],
[ 0.21045232, 0.57816165, 0.6670992 ],
[ 0.2137135 , 0.58185676, 0.66805579],
[ 0.21704227, 0.58554813, 0.6690072 ],
[ 0.22044214, 0.58923531, 0.66995277],
[ 0.22391686, 0.59291779, 0.67089183],
[ 0.22747037, 0.59659503, 0.67182375],
[ 0.23110686, 0.6002664 , 0.67274791],
[ 0.23483073, 0.60393124, 0.67366376],
[ 0.2386466 , 0.60758879, 0.67457077],
[ 0.24255928, 0.61123826, 0.67546847],
[ 0.24657379, 0.61487875, 0.67635647],
[ 0.25069532, 0.61850931, 0.67723446],
[ 0.25492963, 0.62212933, 0.67809791],
[ 0.25928228, 0.62573723, 0.67895065],
[ 0.26375886, 0.62933183, 0.67979274],
[ 0.26836502, 0.63291182, 0.68062438],
[ 0.27310635, 0.63647586, 0.68144595],
[ 0.27799067, 0.64002284, 0.68225204],
[ 0.28302187, 0.64355084, 0.68304906],
[ 0.28820489, 0.64705829, 0.68383831],
[ 0.29354581, 0.65054365, 0.68461866],
[ 0.29905039, 0.65400523, 0.68538993],
[ 0.30471967, 0.65744127, 0.68615859],
[ 0.31055832, 0.66085014, 0.68692427],
[ 0.31656915, 0.66423017, 0.68768878],
[ 0.32274951, 0.66757995, 0.68845924],
[ 0.32910283, 0.67089793, 0.68923459],
[ 0.33562308, 0.67418311, 0.69002275],
[ 0.34230732, 0.67743451, 0.69082687],
[ 0.34914976, 0.68065146, 0.69165158],
[ 0.35614127, 0.68383379, 0.69250293],
[ 0.36327369, 0.68698145, 0.69338491],
[ 0.37053484, 0.69009497, 0.6943034 ],
[ 0.37791226, 0.69317522, 0.69526327],
[ 0.38539385, 0.69622324, 0.69626827],
[ 0.39296332, 0.69924082, 0.697324 ],
[ 0.4006094 , 0.70222938, 0.69843223],
[ 0.40831594, 0.70519122, 0.69959724],
[ 0.41607015, 0.7081284 , 0.7008209 ],
[ 0.42386201, 0.71104276, 0.70210342],
[ 0.43167256, 0.71393771, 0.70344961],
[ 0.43949947, 0.71681422, 0.70485557],
[ 0.44732849, 0.71967527, 0.70632366],
[ 0.45514951, 0.72252334, 0.70785413],
[ 0.46296115, 0.72535947, 0.70944335],
[ 0.47075349, 0.72818626, 0.71109189],
[ 0.47851694, 0.73100635, 0.71280025],
[ 0.48625412, 0.73382019, 0.71456376],
[ 0.49396102, 0.73662941, 0.716381 ],
[ 0.50163106, 0.73943622, 0.71825179],
[ 0.50925965, 0.74224248, 0.72017521],
[ 0.51685029, 0.74504841, 0.72214731],
[ 0.52440134, 0.74785523, 0.72416643],
[ 0.53191155, 0.75066408, 0.72623094],
[ 0.53938005, 0.753476 , 0.72833923],
[ 0.54680444, 0.75629239, 0.73049037],
[ 0.55418394, 0.75911426, 0.73268299],
[ 0.5615215 , 0.76194178, 0.73491438],
[ 0.56881727, 0.7647757 , 0.73718308],
[ 0.57607152, 0.76761672, 0.73948764],
[ 0.58328469, 0.7704655 , 0.74182668],
[ 0.5904573 , 0.77332263, 0.74419886],
[ 0.59758995, 0.77618869, 0.74660289],
[ 0.60468333, 0.77906419, 0.7490375 ],
[ 0.61173817, 0.78194964, 0.75150151],
[ 0.61875522, 0.7848455 , 0.75399375],
[ 0.62573528, 0.7877522 , 0.7565131 ],
[ 0.63267915, 0.79067016, 0.75905849],
[ 0.63958763, 0.79359978, 0.76162887],
[ 0.64646151, 0.79654145, 0.76422323],
[ 0.6533016 , 0.79949552, 0.76684059],
[ 0.66010867, 0.80246236, 0.76948001],
[ 0.66688348, 0.80544232, 0.77214056],
[ 0.67362676, 0.80843573, 0.77482134],
[ 0.68033922, 0.81144292, 0.77752145],
[ 0.68702155, 0.81446423, 0.78024002],
[ 0.69367439, 0.81749998, 0.7829762 ],
[ 0.7002975 , 0.82055075, 0.78572944],
[ 0.70689165, 0.82361681, 0.78849881],
[ 0.71345823, 0.82669826, 0.79128316],
[ 0.7199978 , 0.82979542, 0.79408159],
[ 0.72651088, 0.83290861, 0.7968932 ],
[ 0.73299796, 0.83603816, 0.79971707],
[ 0.73945951, 0.8391844 , 0.80255223],
[ 0.74589598, 0.84234767, 0.8053977 ],
[ 0.7523078 , 0.84552831, 0.80825242],
[ 0.75869517, 0.84872673, 0.81111536],
[ 0.76505775, 0.85194348, 0.81398568],
[ 0.77139709, 0.85517859, 0.81686163],
[ 0.77771359, 0.85843239, 0.81974187],
[ 0.78400765, 0.86170526, 0.82262493],
[ 0.79027967, 0.86499755, 0.82550921],
[ 0.79653012, 0.8683096 , 0.82839299],
[ 0.8027595 , 0.87164177, 0.83127437],
[ 0.80896806, 0.87499448, 0.83415141],
[ 0.8151567 , 0.87836796, 0.83702171],
[ 0.82132643, 0.88176244, 0.83988264],
[ 0.82747812, 0.88517819, 0.84273144],
[ 0.83361281, 0.88861542, 0.8455651 ],
[ 0.83973174, 0.89207431, 0.84838033],
[ 0.84583638, 0.89555495, 0.85117353],
[ 0.85192854, 0.89905734, 0.85394079],
[ 0.85801043, 0.90258135, 0.85667786],
[ 0.86408444, 0.90612679, 0.85938032],
[ 0.87015339, 0.9096933 , 0.86204351],
[ 0.87622047, 0.9132804 , 0.8646626 ],
[ 0.88228934, 0.9168874 , 0.86723268],
[ 0.88836402, 0.92051348, 0.86974885],
[ 0.89444909, 0.92415756, 0.87220619],
[ 0.90054949, 0.92781839, 0.87459995],
[ 0.90666975, 0.93149475, 0.87692634],
[ 0.91281478, 0.93518523, 0.87918186],
[ 0.91898934, 0.93888836, 0.88136368],
[ 0.92519787, 0.94260271, 0.88346979],
[ 0.93144429, 0.94632694, 0.88549899],
[ 0.93773184, 0.95005986, 0.88745092],
[ 0.94406341, 0.95380038, 0.88932539],
[ 0.95044015, 0.95754793, 0.89112376],
[ 0.95686223, 0.96130226, 0.89284794],
[ 0.963329 , 0.96506343, 0.8944999 ],
[ 0.96983887, 0.96883187, 0.89608183],
[ 0.97638938, 0.97260834, 0.89759591],
[ 0.98297732, 0.97639393, 0.89904421],
[ 0.98959887, 0.98019003, 0.90042859],
[ 0.99624974, 0.98399826, 0.90175064]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
mit
|
zingale/pyro2
|
analysis/dam_compare.py
|
2
|
3546
|
#!/usr/bin/env python3
from __future__ import print_function
import numpy as np
from scipy.optimize import brentq
import sys
import os
import matplotlib.pyplot as plt
from util import msg, runparams, io
usage = """
compare the output for a dam problem with the exact solution contained
in dam-exact.out.
usage: ./dam_compare.py file
"""
def abort(string):
print(string)
sys.exit(2)
if not len(sys.argv) == 2:
print(usage)
sys.exit(2)
try:
file1 = sys.argv[1]
except IndexError:
print(usage)
sys.exit(2)
sim = io.read(file1)
myd = sim.cc_data
myg = myd.grid
# time of file
t = myd.t
if myg.nx > myg.ny:
# x-problem
xmin = myg.xmin
xmax = myg.xmax
param_file = "inputs.dam.x"
else:
# y-problem
xmin = myg.ymin
xmax = myg.ymax
param_file = "inputs.dam.y"
height = myd.get_var("height")
xmom = myd.get_var("x-momentum")
ymom = myd.get_var("y-momentum")
# get the 1-d profile from the simulation data -- assume that whichever
# coordinate is the longer one is the direction of the problem
# parameter defaults
rp = runparams.RuntimeParameters()
rp.load_params("../_defaults")
rp.load_params("../swe/_defaults")
rp.load_params("../swe/problems/_dam.defaults")
# now read in the inputs file
if not os.path.isfile(param_file):
# check if the param file lives in the solver's problems directory
param_file = "../swe/problems/" + param_file
if not os.path.isfile(param_file):
msg.fail("ERROR: inputs file does not exist")
rp.load_params(param_file, no_new=1)
if myg.nx > myg.ny:
# x-problem
x = myg.x[myg.ilo:myg.ihi+1]
jj = myg.ny//2
h = height[myg.ilo:myg.ihi+1, jj]
u = xmom[myg.ilo:myg.ihi+1, jj]/h
ut = ymom[myg.ilo:myg.ihi+1, jj]/h
else:
# y-problem
x = myg.y[myg.jlo:myg.jhi+1]
ii = myg.nx//2
h = height[ii, myg.jlo:myg.jhi+1]
u = ymom[ii, myg.jlo:myg.jhi+1]/h
ut = xmom[ii, myg.jlo:myg.jhi+1]/h
print(myg)
x_exact = x
h_exact = np.zeros_like(x)
u_exact = np.zeros_like(x)
# find h0, h1
h1 = rp.get_param("dam.h_left")
h0 = rp.get_param("dam.h_right")
def find_h2(h2):
return (h2/h1)**3 - 9*(h2/h1)**2*(h0/h1) + \
16*(h2/h1)**1.5*(h0/h1) - (h2/h1)*(h0/h1)*(h0/h1+8) + \
(h0/h1)**3
h2 = brentq(find_h2, min(h0, h1), max(h0, h1))
# calculate sound speeds
g = rp.get_param("swe.grav")
c0 = np.sqrt(g*h0)
c1 = np.sqrt(g*h1)
c2 = np.sqrt(g*h2)
u2 = 2 * (c1 - c2)
# shock speed
xi = c0 * np.sqrt(1/8 * ((2*(c2/c0)**2 + 1)**2 - 1))
xctr = 0.5*(xmin + xmax)
# h0
idx = x >= xctr + xi*t
h_exact[idx] = h0
u_exact[idx] = 0
# h1
idx = x <= xctr - c1*t
h_exact[idx] = h1
u_exact[idx] = 0
# h2
idx = ((x >= xctr + (u2-c2)*t) & (x < xctr + xi*t))
h_exact[idx] = h2
u_exact[idx] = u2
# h3
idx = ((x >= xctr - c1*t) & (x < xctr + (u2-c2)*t))
c3 = 1/3 * (2*c1 - (x-xctr)/t)
h_exact[idx] = c3[idx]**2 / g
u_exact[idx] = 2 * (c1-c3[idx])
# plot
fig, axes = plt.subplots(nrows=2, ncols=1, num=1)
plt.rc("font", size=10)
ax = axes.flat[0]
ax.plot(x_exact, h_exact, label='Exact')
ax.scatter(x, h, marker="x", s=7, color="r", label='Pyro')
ax.set_ylabel(r"$h$")
ax.set_xlim(0, 1.0)
ax.set_ylim(0, 1.1)
ax = axes.flat[1]
ax.plot(x_exact, u_exact)
ax.scatter(x, u, marker="x", s=7, color="r")
ax.set_ylabel(r"$u$")
ax.set_xlim(0, 1.0)
if (myg.nx > myg.ny):
ax.set_xlabel(r"x")
else:
ax.set_xlabel(r"y")
lgd = axes.flat[0].legend()
plt.subplots_adjust(hspace=0.25)
fig.set_size_inches(8.0, 8.0)
plt.savefig("dam_compare.png", bbox_inches="tight")
|
bsd-3-clause
|
ContinuumIO/blaze
|
blaze/expr/functions.py
|
3
|
3572
|
"""
Dispatches functions like ``sum`` to builtins, numpy, or blaze depending on
input
>>> from blaze import sum, symbol
>>> sum([1, 2, 3])
6
>>> type(sum([1, 2, 3])).__name__
'int'
>>> type(sum(np.array([1, 2, 3], dtype=np.int64))).__name__
'int64'
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> type(sum(t.x)).__name__
'sum'
"""
from __future__ import absolute_import, division, print_function
import math as pymath
from numbers import Number
import numpy as np
import pandas as pd
from multipledispatch import Dispatcher
from ..dispatch import namespace
from ..compatibility import builtins
from . import reductions
from . import math as blazemath
from .core import base
from .expressions import Expr, coalesce
"""
The following code creates reductions equivalent to the following:
@dispatch(Expr)
def sum(expr):
return blaze.sum(expr)
@dispatch(np.ndarray)
def sum(x):
return np.sum(x)
@dispatch(object)
def sum(o):
return builtins.sum(o)
As well as mathematical functions like the following
@dispatch(Expr)
def sqrt(expr):
return blaze.expr.math.sqrt(expr)
@dispatch(np.ndarray)
def sqrt(x):
return np.sqrt(x)
@dispatch(object)
def sqrt(o):
return math.sqrt(o)
"""
math_names = '''abs sqrt sin cos tan sinh cosh tanh acos acosh asin asinh atan atanh
exp log expm1 log10 log1p radians degrees ceil floor trunc isnan'''.split()
binary_math_names = "atan2 copysign fmod hypot ldexp greatest least coalesce".split()
reduction_names = '''any all sum min max mean var std'''.split()
__all__ = math_names + binary_math_names + reduction_names
types = {
builtins: object,
np: (np.ndarray, np.number),
pymath: Number,
blazemath: Expr,
reductions: Expr,
}
binary_types = {
builtins: [(object, object)],
np: [((np.ndarray, np.number), (np.ndarray, np.number))],
pymath: [(Number, Number)],
blazemath: [(Expr, (Expr, base)), (base, Expr)]
}
def _coalesce_objects(lhs, rhs):
# use pd.isnull for None, NaT, and others
return lhs if not pd.isnull(lhs) else rhs
def _coalesce_arrays(lhs, rhs):
np.where(pd.isnull(lhs), rhs, lhs)
fallback_binary_mappings = {
'greatest': {
builtins: max,
np: np.maximum,
pymath: max,
},
'least': {
builtins: min,
np: np.minimum,
pymath: min,
},
'coalesce': {
builtins: _coalesce_objects,
np: _coalesce_arrays,
pymath: _coalesce_objects,
blazemath: coalesce,
},
}
for funcname in math_names: # sin, sqrt, ceil, ...
d = Dispatcher(funcname)
for module, typ in types.items():
if hasattr(module, funcname):
d.add((typ,), getattr(module, funcname))
namespace[funcname] = d
locals()[funcname] = d
for funcname in binary_math_names: # hypot, atan2, fmod, ...
d = Dispatcher(funcname)
for module, pairs in binary_types.items():
for pair in pairs:
if hasattr(module, funcname):
d.add(pair, getattr(module, funcname))
elif funcname in fallback_binary_mappings:
assert module in fallback_binary_mappings[funcname], module.__name__
d.add(pair, fallback_binary_mappings[funcname][module])
namespace[funcname] = d
locals()[funcname] = d
for funcname in reduction_names: # any, all, sum, max, ...
d = Dispatcher(funcname)
for module, typ in types.items():
if hasattr(module, funcname):
d.add((typ,), getattr(module, funcname))
namespace[funcname] = d
locals()[funcname] = d
|
bsd-3-clause
|
tomlof/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
82
|
5012
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
rtrwalker/geotecha
|
geotecha/beam_on_foundation/dingetal2012.py
|
1
|
16769
|
# geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""
Ding et al (2012) "Convergence of Galerkin truncation for dynamic response
of finite beams on nonlinear foundations under a moving load".
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from geotecha.mathematics.root_finding import find_n_roots
from scipy import integrate
from scipy.integrate import odeint
import time
from datetime import timedelta
import datetime
from collections import OrderedDict
import os
class DingEtAl2012(object):
"""Finite elastic Euler-Bernoulli beam resting on non-linear
viscoelastic foundation subjected to a moving load.
An implementation of Ding et al. (2012) [1]_.
You don't need all the parameters. Basically if normalised values are
equal None (i.e. default) then those properties will be calculated from
the non normalised quantities. All calculations are done in normalised
space. You only need the non-normalised variables if you want
non-normalised output.
Parameters
----------
BC : ["SS", "CC", "FF"], optional
Boundary condition. Simply Supported (SS), Clamped Clamped ("CC"),
Free Free (FF). Default BC="SS".
nterms : integer, optional
Number of terms for Galerkin truncation. Default nterms=50
E : float, optional
Young's modulus of beam.(E in [1]_).
rho : float, optional
Mass density of beam.
I : float, optional
Second moment of area of beam (I in [1]_).
A : float, optional
Cross sectional area of beam
L : float, optional
Length of beam. (L in [1]_).
k1 : float, optional
Mean stiffness of foundation.
k3 : float, optional
Non linear stiffness of foundation.
mu : float, optional
Viscous damping of foundation.
Fz : float, optional
Load.
v : float, optional
Speed of the moving force.
v_norm : float, optional
normalised velocity = V * sqrt(rho / E). An example of a consistent
set of units to get the correct v_norm is rho in kg/m^3, L in m, and
E in Pa.
kf : float, optional
Normalised modulus of elasticity = 1 / L * sqrt(I / A).
Fz_norm : float, optional
Normalised load = Fz / (E * A).
mu_norm : float, optional
Normalised damping = mu / A * sqrt(L**2 / (rho * E)).
k1_norm : float, optional
Normalised mean stiffness = k1 * L**2 / (E * A).
k3_norm : float, optional
Normalised non-linear stiffness = k3 * L**4 / (E * A)
nquad : integer, optional
Number of quadrature points for numerical integration of non-linear
k3*w**3*w_ term. Default nquad=30. Note I've had errors when n>35.
For the special case of nquad=None then integration will be performed
using scipy.integrate.quad; this is slower.
Attributes
----------
phi : function
Relevant Galerkin trial function. Depends on `BC`. See [1]_ for
details.
beta : 1d ndarray of `nterms` float
beta terms in Galerkin trial function.
xj : 1d ndarray of `nquad` float
Quadrature integration points.
Ij : 1d ndarray of `nquad` float
Weighting coefficients for numerical integration.
BC_coeff : int
Coefficent to multiply the Fz and k3 terms in the ode.
For `BC`="SS" BC_coeff=2, for `BC`="CC" or "FF" BC_coeff=2.
See [1]_ Equation (31) and (32).
t : 1d ndarray of float
Raw time values. Only valid after running calulate_qk.
t_norm : 1d ndarray of float
Normlised time values = t / L * sqrt(E / rho).
Only valid after running calulate_qk.
qsol : 2d ndarray of shape(len(`t`), 2* `nterms`) float
Values of Galerkin coefficients at each time i.e. qk(t) in [1]_.
w(x) = sum(qk * phi_k(x)).
Only valid after running calulate_qk.
References
----------
.. [1] Ding, H., Chen, L.-Q., and Yang, S.-P. (2012).
"Convergence of Galerkin truncation for dynamic response of
finite beams on nonlinear foundations under a moving load."
Journal of Sound and Vibration, 331(10), 2426-2442.
"""
def __init__(self,
BC="SS",
nterms=50,
E=None,
I=None,
rho=None,
A=None,
L=None,
k1=None,
k3=None,
mu=None,
Fz=None,
v=None,
v_norm=None,
kf=None,
Fz_norm=None,
mu_norm=None,
k1_norm=None,
k3_norm=None,
nquad=30):
self.BC = BC
self.nterms = nterms
self.E = E
self.I = I
self.rho = rho
self.I = I
self.A = A
self.L = L
self.k1 = k1
self.k3 = k3
self.mu = mu
self.Fz = Fz
self.v = v
self.v_norm = v_norm
self.kf = kf
self.Fz_norm = Fz_norm
self.mu_norm = mu_norm
self.k1_norm = k1_norm
self.k3_norm = k3_norm
self.nquad = nquad
# normalised parameters
if kf is None:
self.kf = 1 / self.L * np.sqrt(self.I / self.A)
if v_norm is None:
self.v_norm = self.v * np.sqrt(self.rho / self.E)
if self.kf is None:
self.kf = 1 / self.L * np.sqrt(self.I / self.A)
if Fz_norm is None:
self.Fz_norm = self.Fz / (self.E * self.A)
if self.mu_norm is None:
self.mu_norm = self.mu / self.A * np.sqrt(self.L**2 / (self.rho * self.E))
if self.k1_norm is None:
self.k1_norm = self.k1 * self.L**2 / (self.E * self.A)
if self.k3_norm is None:
self.k3_norm = self.k3 * self.L**4 / (self.E* self.A)
# phi, beta, and quadrature points
if self.BC == "SS":
self.phi = self.phiSS
self.beta = np.pi * (np.arange(self.nterms) + 1)
if not self.nquad is None:
self.xj = 0.5 * (1 - np.cos(np.pi * np.arange(self.nquad) / (self.nquad - 1)))
self.BC_coeff = 2
elif self.BC == "CC" or self.BC == "FF":
def _f(beta):
return 1 - np.cos(beta) * np.cosh(beta)
self.beta = find_n_roots(_f, n=self.nterms, x0=0.001, dx=3.14159 / 10, p=1.1)
if not self.nquad is None:
self.xj = 0.5*(1 - np.cos(np.pi * np.arange(self.nquad)/(self.nquad - 3)))
self.xj[0] = 0
self.xj[1] = 0.0001
self.xj[-2] = 0.0001
self.xj[-1] = 0
self.BC_coeff = 1 # Coefficeint to multiply Fz(vt) and k3*Integral terms by
if self.BC == "CC":
self.phi = self.phiCC
if self.BC == "FF":
self.phi = self.phiFF
self.beta[1:] = self.beta[0:-1]
self.beta[0] = 0.0
else:
raise ValueError("only BC='SS', 'CC' and 'FF' have been implemented, you have {}".format(self.BC))
# quadrature weighting
if not self.nquad is None:
rhs = np.reciprocal(np.arange(self.nquad, dtype=float) + 1)
lhs = self.xj[np.newaxis, :] ** np.arange(self.nquad)[:, np.newaxis]
self.Ij = np.linalg.solve(lhs, rhs)
self.vsvdot = np.zeros(2 * self.nterms) #vector of state values for odeint
def phiSS(self, x, beta):
return np.sin(beta * x)
def phiCC(self, x, beta):
def _xi(beta):
return (np.cosh(beta) - np.cos(beta))/(np.sinh(beta) - np.sin(beta))
return (np.cosh(beta * x)
- np.cos(beta * x)
+ _xi(beta) * (np.sin(beta * x) - np.sinh(beta * x))
)
def phiFF(self, x, beta):
def _xi(beta):
return (-(np.cos(beta) - np.cosh(beta))/(np.sin(beta) - np.sinh(beta)))
return (np.cos(beta * x)
+ np.cosh(beta * x)
+ _xi(beta) * (np.sin(beta * x) + np.sinh(beta * x))
)
def w(self, qk, x):
"""Nomalised vertcal deformation at x
Parameters
----------
qk : 1d ndarray of nterm floats
Galerkin coefficients.
x : float
nomalised distances to calculate deflection at.
Returns
-------
w : float
vertical deformation at x value.
"""
return np.sum(qk * self.phi(x, self.beta))
def vectorfield(self, vsv, tnow, p=()):
"""
Parameters
----------
vsv : float
Vector of the state variables.
vsv = [q1, q2, ...qk, q1dot, q2dot, ..., qkdot]
where qk is the kth galerkin coefficient and qkdot is the time
derivative of the kth Galerkin coefficient.
tnow : float
Current time.
p : various
Vector of parameters
Returns
-------
vsvdot : vector of state variables first derivatives
vsvdot = [q1dot, q2dot, ...qkdot, q1dotdot, q2dotdot, ..., qkdotdot]
"""
q = vsv[:self.nterms]
qdot = vsv[self.nterms:]
for i in range(self.nterms):
self.vsvdot[i] = qdot[i]
self.vsvdot[self.nterms + i] = - self.mu_norm * qdot[i]
self.vsvdot[self.nterms + i] -= (self.k1_norm + self.kf**2 * self.beta[i]**4) * q[i]
self.vsvdot[self.nterms + i] += self.BC_coeff * self.Fz_norm * self.phi(self.v_norm * tnow, self.beta[i])
if 1:
# DIY quadrature
Fj = np.sum(q[:,None] * self.phi(self.xj[None, :], self.beta[:,None]), axis=0)**3
y = np.sum(self.Ij * Fj * self.phi(self.xj, self.beta[i]))
else:
# scipy
# print("yquad = {:12.2g}".format(y))
y, err = integrate.quad(self.w_cubed_wi, 0, 1, args=(q,i))
# print("yscip = {:12.2g}".format(y))
#maybe the quad integrate is not great for the oscillating function
self.vsvdot[self.nterms + i] -= self.BC_coeff * self.k3_norm * y
return self.vsvdot
def w_cubed_wi(self, x, q, i):
"""non-linear cube term for numerical integration"""
return self.w(q, x)**3 * self.phi(x, self.beta[i])
def calulate_qk(self, t=None, t_norm=None, **odeint_kwargs):
"""Calculate the nterm Galerkin coefficients qk at each time value
Parameters
----------
t : float or array of float, optional
Raw time values.
t_norm : float or array of float, optional
Normalised time values. If t_norm==None then it will be
calculated from raw t values and other params
t_norm = t / L * sqrt(E / rho).
Notes
-----
This method determines initializes self.t and self.t_norm and
calculates `self.qsol`.
"""
if t_norm is None:
self.t_norm = t / self.L * np.sqrt(self.E / self.rho)
else:
self.t_norm = t_norm
self.t = t
vsv0 = np.zeros(2*self.nterms) # initial conditions
self.qsol = odeint(self.vectorfield,
vsv0,
self.t_norm,
args=(),
**odeint_kwargs)
def wofx(self, x=None, x_norm=None, tslice=slice(None, None, None), normalise_w=True):
"""Deflection at distance x, and times t[tslice]
Parameters
----------
x : float or ndarray of float, optional
Raw values of x to calculate deflection at. Default x=None.
x_norm : float or array of float, optional
Normalised x values to calc deflection at. If x_norm==None then
it will be calculated frm `x` and other properties : x_norm = x / L.
Default x_norm=None.
tslice : slice object, optional
slice to select subset of time values. Default
tslice=slice(None, None, None) i.e. all time values.
Note the array of time values is already in the object (it was
used to calc the qk galerkin coefficients).
normalise_w : True/False, optional
If True then output is normalised deflection. Default nomalise_w=True.
Returns
-------
w : array of float
Deflections at x and self.t_norm[tslice]
"""
if x_norm is None:
x_norm = x / self.L
x_norm = np.atleast_1d(x_norm)
v = np.zeros((len(x_norm), len(self.t_norm[tslice])))
for i, xx in enumerate(x_norm):
for j, qq in enumerate(self.qsol[tslice, :self.nterms]):
v[i, j] = self.w(qq, xx)
if not normalise_w is None:
v *= self.L
if len(x_norm)==1:
return v[0]
else:
return v
def dingetal_figure_8():
"""Reproduce Ding Et Al 2012 Figure 8 (might take a while).
Note that a plot will be be saved to disk in current working directory
as well as a timing file."""
start_time0 = time.time()
ftime = datetime.datetime.fromtimestamp(start_time0).strftime('%Y-%m-%d %H%M%S')
with open(ftime + ".txt", "w") as f:
f.write(ftime + os.linesep)
fig=plt.figure()
ax = fig.add_subplot("111")
ax.set_xlabel("time, s")
ax.set_ylabel("w, m")
ax.set_xlim(3.5, 4.5)
for v in [50, 75, 150, 200]:#[50, 75, 150, 200]
vstr="nterms"
vfmt="{}"
start_time1 = time.time()
f.write("*" * 70 + os.linesep)
vequals = "{}={}".format(vstr, vfmt.format(v))
f.write(vequals + os.linesep); print(vequals)
pdict = OrderedDict(
E = 6.998*1e9, #Pa
rho = 2373, #kg/m3
L = 160, #m
v_norm=0.01165,
kf=5.41e-4,
Fz_norm=1.013e-4,
mu_norm=39.263,
k1_norm=97.552,
k3_norm=2.497e6,
nterms=v,
BC="CC",
nquad=20,)
t = np.linspace(0, 4.5, 400)
f.write(repr(pdict) + os.linesep)
for BC in ["SS"]:# "CC", "FF"]:
pdict["BC"] = BC
a = DingEtAl2012(**pdict)
a.calulate_qk(t=t)
x = t
y = a.wofx(x_norm=0.5, normalise_w=False)
ax.plot(x, y, label="x=0.5, {}".format(vequals))
end_time1 = time.time()
elapsed_time = (end_time1 - start_time1); print("Run time={}".format(str(timedelta(seconds=elapsed_time))))
f.write("Run time={}".format(str(timedelta(seconds=elapsed_time))) + os.linesep)
leg = ax.legend()
leg.draggable()
end_time0 = time.time()
elapsed_time = (end_time0 - start_time0); print("Total run time={}".format(str(timedelta(seconds=elapsed_time))))
f.write("Total run time={}".format(str(timedelta(seconds=elapsed_time))) + os.linesep)
plt.savefig(ftime+".pdf")
plt.show()
if __name__ == "__main__":
pass
# dingetal_figure_8()
|
gpl-3.0
|
rhyolight/nupic.research
|
projects/union_path_integration/plot_convergence.py
|
4
|
11807
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot convergence chart."""
import collections
import json
import os
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def chart():
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
# Convergence vs. number of objects, comparing # unique features
#
# Generated with:
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 50 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_50_feats.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 100 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_100_feats.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 5000 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_5000_feats.json
#plt.style.use("ggplot")
markers = ("s", "o", "^")
for feats, marker in zip((100, 200, 5000), markers):
with open("results/convergence_vs_num_objs_{}_feats.json".format(feats), "r") as f:
convVsObjects = json.load(f)
yData = collections.defaultdict(list)
for exp in convVsObjects:
numObjects = int(str(exp[0]["numObjects"]))
if "null" in exp[1]["convergence"].keys():
continue
results = exp[1]["convergence"].items()
total = 0
count = 0
for i, j in results:
total += (int(str(i)) * j)
count += j
y = float(total) / float(count)
yData[numObjects].append(y)
x = list(sorted(yData.keys()))
yData = sorted(yData.iteritems())
y = [float(sum(pair[1])) / float(len(pair[1]))
if None not in pair[1] else None
for pair in yData]
std = [np.std(pair[1])
for pair in yData]
yBelow = [yi - stdi
for yi, stdi in zip(y, std)]
yAbove = [yi + stdi
for yi, stdi in zip(y, std)]
xError = x[:len(yBelow)]
plt.plot(
x, y, "{}-".format(marker), label="{} unique features".format(feats),
)
#plt.fill_between(xError, yBelow, yAbove, alpha=0.3)
plt.xlabel("Number of Objects")
plt.xticks([(i+1)*200 for i in xrange(10)])
plt.ylabel("Average Number of Sensations")
plt.legend(loc="center right")
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "convergence_vs_objects_w_feats.pdf"))
plt.clf()
# Convergence vs. number of objects, varying module size
# NOT USED in Columns Plus
#
# Generated with:
# TODO
#plt.style.use("ggplot")
#for cpm in (25, 100, 400):
# with open("results/convergence_vs_num_objs_{}_cpm.json".format(cpm), "r") as f:
# convVsObjs = json.load(f)
# yData = collections.defaultdict(list)
# for exp in convVsObjs:
# results = exp[1]["convergence"].items()
# total = 0
# count = 0
# for i, j in results:
# total += (int(str(i)) * j)
# count += j
# y = float(total) / float(count)
# numObjects = int(str(exp[0]["numObjects"]))
# yData[numObjects].append(y)
# x = list(sorted(yData.keys()))
# yData = sorted(yData.iteritems())
# y = [float(sum(pair[1])) / float(len(pair[1])) for pair in yData]
# std = [np.std(pair[1]) for pair in yData]
# yBelow = [yi - stdi for yi, stdi in zip(y, std)]
# yAbove = [yi + stdi for yi, stdi in zip(y, std)]
# plt.plot(
# x, y, "o-", label="{} cells per module".format(cpm),
# )
# plt.fill_between(x, yBelow, yAbove, alpha=0.3)
#plt.xlabel("Number of Objects")
#plt.ylabel("Average Number of Sensations")
#plt.legend(loc="upper left")
#plt.tight_layout()
#plt.savefig(os.path.join(CHART_DIR, "convergence_with_modsize.pdf"))
#plt.clf()
# Convergence vs. number of modules
#
# Generated with:
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 5 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_25_cpm.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 10 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_100_cpm.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 20 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_400_cpm.json --repeat 10
#plt.style.use("ggplot")
markers = ("s", "o", "^")
for cpm, marker in zip((49, 100, 400), markers):
with open("results/convergence_vs_num_modules_100_feats_{}_cpm.json".format(cpm), "r") as f:
convVsMods100 = json.load(f)
yData = collections.defaultdict(list)
for exp in convVsMods100:
results = exp[1]["convergence"].items()
total = 0
count = 0
for i, j in results:
if str(i) == "null":
total = 50 * j
else:
total += (int(str(i)) * j)
count += j
y = float(total) / float(count)
numModules = int(str(exp[0]["numModules"]))
yData[numModules].append(y)
x = [i+1 for i in xrange(20)]
#y = [float(sum(pair[1])) / float(len(pair[1])) for pair in yData]
y = [float(sum(yData[step])) / float(len(yData[step])) for step in x]
#yData20 = yData[19][1]
#y20 = float(sum(yData20)) / float(len(yData20))
yData = sorted(yData.iteritems())
std = [np.std(pair[1]) for pair in yData]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "{}-".format(marker),
label="{} cells per module".format(cpm),
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# TODO: Update this to ideal?
plt.plot([1, 20], [2.022, 2.022], "r--", label="Ideal")
plt.xlabel("Number of Modules")
plt.ylabel("Average Number of Sensations")
plt.legend(loc="upper right")
plt.ylim((0.0, 7.0))
plt.xticks([(i+1)*2 for i in xrange(10)])
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "convergence_vs_modules_100_feats.pdf"))
plt.clf()
# Cumulative convergence
#
# Generated with:
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 10 --locationModuleWidth 20 --thresholds 18 --resultName results/cumulative_convergence_400_cpm_10_feats_100_objs.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 10 --locationModuleWidth 10 --thresholds 19 --resultName results/cumulative_convergence_100_cpm_10_feats_100_objs.json --repeat 10
# python ideal_sim.py
# python bof_sim.py
numSteps = 12
# 1600 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_1600_cpm_10_feats_100_objs.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "s-", label="1600 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# 400 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_400_cpm_10_feats_100_objs.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "o-", label="400 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
## 289 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_289_cpm_10_feats_100_objs_1.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "^-", label="289 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# Ideal
with open("results/ideal.json", "r") as f:
idealResults = json.load(f)
x = [i+1 for i in xrange(numSteps)]
y = []
std = [np.std(idealResults.get(str(steps), [0])) for steps in x]
tot = float(sum([sum(counts) for counts in idealResults.values()]))
cum = 0.0
for steps in x:
counts = idealResults.get(str(steps), [])
if len(counts) > 0:
cum += float(sum(counts))
y.append(100.0 * cum / tot)
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "x--", label="Ideal Observer",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# BOF
with open("results/bof.json", "r") as f:
bofResults = json.load(f)
x = [i+1 for i in xrange(numSteps)]
y = []
std = [np.std(bofResults.get(str(steps), [0])) for steps in x]
tot = float(sum([sum(counts) for counts in bofResults.values()]))
cum = 0.0
for steps in x:
counts = bofResults.get(str(steps), [])
if len(counts) > 0:
cum += float(sum(counts))
y.append(100.0 * cum / tot)
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "d--", label="Bag of Features",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# Formatting
plt.xlabel("Number of Sensations")
plt.ylabel("Cumulative Accuracy")
plt.legend(loc="center right")
plt.xticks([(i+1)*2 for i in xrange(6)])
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "cumulative_accuracy.pdf"))
plt.clf()
if __name__ == "__main__":
chart()
|
gpl-3.0
|
rsivapr/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
5
|
7727
|
"""
Test the fastica algorithm.
"""
import warnings
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
ica = FastICA(whiten=False, random_state=0)
ica.fit(m)
ica.mixing_
# test for issue #697
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ica = FastICA(n_components=1, whiten=False, random_state=0)
ica.fit(m) # should raise warning
assert_true(len(w) == 1) # 1 warning should be raised
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
bsd-3-clause
|
UNR-AERIAL/scikit-learn
|
sklearn/datasets/tests/test_mldata.py
|
384
|
5221
|
"""Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
|
bsd-3-clause
|
carrillo/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
244
|
7588
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
dseredyn/velma_scripts
|
scripts/symbolic_planner3.py
|
1
|
26371
|
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_scripts')
import rospy
import tf
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from barrett_hand_controller_msgs.msg import *
from cartesian_trajectory_msgs.msg import *
from visualization_msgs.msg import *
import actionlib
from actionlib_msgs.msg import *
from threading import Lock
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import force_control_msgs.msg
import PyKDL
import math
import numpy as np
import copy
import matplotlib.pyplot as plt
import thread
import random
import itertools
import operator
import rospkg
from scipy import optimize
import re
def opened(ws, ob):
if ob[0] == 'Door':
assert "state" in ob[1]
return ob[1]["state"] == "opened"
raise TypeError("wrong type in predicate opened: " + ob[0])
def reachable(ws, o):
if o[0] == 'Object':
assert "pose" in o[1]
str_list = o[1]["pose"].split()
if str_list[0] == "inside":
cont_name = str_list[1]
cont = ws.getObject(cont_name)
door1_name = cont[1]["door1"]
door2_name = cont[1]["door2"]
if opened(ws, ws.getObject(door1_name)) and opened(ws, ws.getObject(door2_name)):
return True
else:
return False
else:
return True
raise TypeError("wrong types in predicate reachable: " + o[0] + " " + c[0])
def inside(ws, o1, o2):
if o1[0] == 'Object' and o2[0] == 'Container':
assert "pose" in o1[1]
str_list = o1[1]["pose"].split()
if str_list[0] == "inside":
cont_name = str_list[1]
if cont_name == o2[1]["name"]:
return True
else:
return False
else:
return False
raise TypeError("wrong types in predicate inside: " + o1[0] + " " + o2[0])
def part_of(ws, d, o):
if d[0] == 'Door' and o[0] == 'Container':
if o[1]["door1"] == d[1]["name"] or o[1]["door2"] == d[1]["name"]:
return True
else:
return False
raise TypeError("wrong types in predicate part_of: " + d[0] + " " + o[0])
def free(ws, ob):
if ob[0] == 'Manipulator':
assert "grasped_name" in ob[1]
return ob[1]["grasped_name"] == None
raise TypeError("wrong type in predicate free: " + ob[0])
def grasped(ws, ob1, ob2):
if ob1[0] == 'Manipulator' and ob2[0] == 'Door':
assert "grasped_name" in ob1[1]
assert "name" in ob2[1]
return ob1[1]["grasped_name"] == ob2[1]["name"]
if ob1[0] == 'Manipulator' and ob2[0] == 'Object':
assert "grasped_name" in ob1[1]
assert "name" in ob2[1]
return ob1[1]["grasped_name"] == ob2[1]["name"]
raise TypeError("wrong types in predicate grasped: " + ob1[0] + " " + ob2[0])
def conf_feasible(ws, ob):
if ob[0] == 'Manipulator':
assert "conf_feasible" in ob[1]
return ob[1]["conf_feasible"]
raise TypeError("wrong type in predicate conf_feasible: " + ob[0])
def ajar(ws, ob):
if ob[0] == 'Door':
assert "state" in ob[1]
return ob[1]["state"] == "ajar"
raise TypeError("wrong type in predicate ajar: " + ob[0])
def closed(ws, ob):
if ob[0] == 'Door':
assert "state" in ob[1]
return ob[1]["state"] == "closed"
raise TypeError("wrong type in predicate closed: " + ob[0])
def pose_certain(ws, ob):
if ws.typeMatch(ob[0], 'VerticalPlane'):
assert "pose_certain" in ob[1]
return ob[1]["pose_certain"]
raise TypeError("wrong type in predicate pose_certain: " + ob[0])
# arguments: expr_str, parameters
# returned value:
# dictionary: pred_str:(pred_name, [arg1_name, arg2_name,...], [arg1_type, arg2_type,...]):
def extractPredicatesAbst(expr_str, parameters):
assert parameters != None
result = {}
e = 0
while True:
s = expr_str.find("[", e)
if s < 0:
break
e = expr_str.find("]", s)+1
if expr_str[s+1:e-1] in result:
continue
exp = expr_str[s+1:e-1].split()
pred_name = exp[0]
arg_name = []
arg_type = []
for idx in range(1, len(exp)):
assert exp[idx][0] == "?"
arg_name.append(exp[idx])
arg_type.append( parameters[exp[idx]] )
result[expr_str[s+1:e-1]] = (pred_name, arg_name, arg_type)
return result
# arguments: expr_str, parameters
# returned value:
# dictionary: pred_str:(pred_name, [obj1_name, obj2_name,...], [arg1_name, arg2_name,...], [arg1_type, arg2_type,...]):
def extractPredicates(expr_str, parameters, obj_types_map):
result = {}
e = 0
while True:
s = expr_str.find("[", e)
if s < 0:
break
e = expr_str.find("]", s)+1
if expr_str[s+1:e-1] in result:
continue
exp = expr_str[s+1:e-1].split()
pred_name = exp[0]
obj_names = []
arg_names = []
arg_types = []
for idx in range(1, len(exp)):
if parameters != None and exp[idx] in parameters:
assert exp[idx][0] == "?"
obj_names.append( None )
arg_names.append( exp[idx] )
arg_types.append( parameters[exp[idx]] )
else:
assert exp[idx][0] != "?"
obj_names.append( exp[idx] )
arg_names.append( None )
if obj_types_map != None:
assert exp[idx] in obj_types_map
arg_types.append( obj_types_map[exp[idx]] )
result[expr_str[s+1:e-1]] = (pred_name, obj_names, arg_names, arg_types)
return result
def getAllPossibilities(goal_exp, parameters, obj_types_map):
# get all required conditions that archieve the goal expression
goal_pred = extractPredicates(goal_exp, parameters, obj_types_map)
goal_product = itertools.product([True, False], repeat = len(goal_pred))
goal_cases = []
for subset in goal_product:
goal_str = goal_exp
i = 0
case = {}
for pred in goal_pred:
goal_str = goal_str.replace("["+pred+"]", str(subset[i]))
case[pred] = (goal_pred[pred], subset[i])
i += 1
if eval(goal_str):
goal_cases.append(case)
return goal_cases
def substitute(exp_str, subst_map):
result = exp_str
for s in subst_map:
result = result.replace(s, subst_map[s])
return result
def generateSubstitutionCases(obj_type_map, parameters, substitutions):
# get other substitutions
subst2 = {}
subst2_inv = {}
type_pool = {}
for var in parameters:
if not var in substitutions:
var_type = parameters[var]
subst2[var] = var_type
if not var_type in subst2_inv:
subst2_inv[var_type] = [var]
else:
subst2_inv[var_type].append(var)
if not var_type in type_pool:
objs = obj_type_map[var_type]
type_pool[var_type] = []
for obj in objs:
obj_used = False
for vv in substitutions:
if obj == substitutions[vv]:
obj_used = True
if not obj_used:
type_pool[var_type].append(obj)
else:
assert False # ok
ll = []
ll_types = []
# generate cases for substitution
subst_cases = []
for var_type in subst2_inv:
list_a = []
x = itertools.combinations( type_pool[var_type], len(subst2_inv[var_type]) )
for elem in x:
p = itertools.permutations(elem)
for e in p:
list_a.append(e)
ll.append( list_a )
ll_types.append(var_type)
prod = itertools.product(*ll)
for e in prod:
s_case = {}
for ti in range(len(ll_types)):
type_name = ll_types[ti]
for vi in range(len(e[ti])):
subst_var = subst2_inv[type_name][vi]
subst_dest = e[ti][vi]
s_case[subst_var] = subst_dest
subst_cases.append(s_case)
return subst_cases
class WorldState:
def __init__(self):
self.objects_t_n_map = {}
self.objects_n_t_map = {}
self.objects_n_map = {}
self.types_t_b_map = {}
self.types_t_attr_map = {}
predicates = [free, grasped, conf_feasible, opened, ajar, closed, pose_certain, inside, reachable, part_of]
self.predicates_map = {}
for pred in predicates:
self.predicates_map[pred.__name__] = pred
def addType(self, type_name, base_types, attributes):
assert not type_name in self.types_t_b_map
assert not type_name in self.types_t_attr_map
self.types_t_b_map[type_name] = base_types
self.types_t_attr_map[type_name] = attributes
def getPred(self, name, args):
arg_names = args.split()
arg_list = []
for arg in arg_names:
arg_list.append( self.objects_n_map[arg] )
return self.predicates_map[name](self, *arg_list)
def simulateAction(self, a, args):
arg_names = args.split()
arg_list = []
for arg in arg_names:
arg_list.append( self.objects_n_map[arg] )
return a.actionSim(*arg_list)
def addObject(self, obj_type, obj_name, obj_parameters):
assert obj_type in self.types_t_b_map
assert not obj_name in self.objects_n_map
assert not "name" in obj_parameters
if not obj_type in self.objects_t_n_map:
self.objects_t_n_map[obj_type] = []
self.objects_t_n_map[obj_type].append( obj_name )
for attr in self.types_t_attr_map[obj_type]:
if not attr in obj_parameters:
raise TypeError("missing attribute " + attr + " in object " + obj_name + " of class " + obj_type)
for tb in self.types_t_b_map[obj_type]:
for attr in self.types_t_attr_map[tb]:
if not attr in obj_parameters:
raise TypeError("missing attribute " + attr + " in object " + obj_name + " of class " + tb)
for tb in self.types_t_b_map[obj_type]:
if not tb in self.objects_t_n_map:
self.objects_t_n_map[tb] = []
self.objects_t_n_map[tb].append( obj_name )
obj_parameters["name"] = obj_name
self.objects_n_map[obj_name] = (obj_type, obj_parameters)
self.objects_n_t_map[obj_name] = obj_type
def getObject(self, obj_name):
assert obj_name in self.objects_n_map
return self.objects_n_map[obj_name]
def typeMatch(self, type_name, base_type):
assert type_name in self.types_t_b_map
return (base_type == type_name) or (base_type in self.types_t_b_map[type_name])
def getEffect(self, action, pred_name, pred_objs, pred_types, pred_value):
for pred_str in action.effect_map:
substitutions = {}
if action.effect_map[pred_str][0] == pred_name:
match = True
for arg_i in range(len(pred_types)):
if not self.typeMatch(pred_types[arg_i], action.effect_map[pred_str][2][arg_i]):
match = False
break
substitutions[ action.effect_map[pred_str][1][arg_i] ] = pred_objs[arg_i]
if match and pred_value:
return substitutions
return None
class Action:
def __init__(self, name, parameters, precondition, effect, rt_failure, action_sim):
self.name = name
par = parameters.split(",")
self.parameters = {}
self.param_list = []
for p in par:
decl = p.split()
self.parameters[decl[0]] = decl[1]
self.param_list.append( decl[0] )
self.precondition = precondition
self.rt_failure = rt_failure
self.effect_map = extractPredicatesAbst(effect, self.parameters)
self.action_sim = action_sim
def actionSim(self, *args):
if self.action_sim != None:
return self.action_sim(*args)
return None
class Scenario:
def __init__(self, init_state, goal, actions):
self.init_state = copy.deepcopy(init_state)
self.goal = copy.copy(goal)
self.actions = actions
self.action_map = {}
for a in self.actions:
self.action_map[a.name] = a
self.steps = [
(None, self.init_state, None, None),
(self.goal, None, None, None) ]
def processGoal(self, goal_pred_list, world_state):
result_steps = []
indent_str = " "
# step_added = False
for pred in goal_pred_list:
predicate = goal_pred_list[pred][0]
pred_value = goal_pred_list[pred][1]
pred_name = predicate[0]
pred_objs = predicate[1]
pred_types = predicate[3]
all_inst = True
pred_args = ""
for obj_name in pred_objs:
if obj_name == None:
all_inst = False
break
pred_args += obj_name + " "
assert all_inst
curr_value = world_state.getPred(pred_name, pred_args)
print indent_str + " ", pred_value, "==", pred_name, pred_objs, pred_types, " (current value: ", curr_value, ")"
# the predicate is not yet satisfied
if curr_value != pred_value:
print indent_str + " ", "not satisfied"
solution_found = False
for a in self.actions:
# get certain substitutions for a given action
substitutions = world_state.getEffect(a, pred_name, pred_objs, pred_types, pred_value)
if substitutions != None:
action_found = True
print indent_str + " ", a.name, substitutions
# print world_state.objects_t_n_map
subst_cases = generateSubstitutionCases(world_state.objects_t_n_map, a.parameters, substitutions)
# fork here: substitutions
sc = subst_cases[0]
sc_all = sc.copy()
sc_all.update(substitutions)
new_state = copy.deepcopy(world_state)
action_args = ""
for par_name in a.param_list:
obj_name = sc_all[par_name]
action_args += obj_name + " "
new_state.simulateAction(a, action_args)
precond = substitute(a.precondition, sc_all)
print "step added", a.name, precond
# self.steps.insert( s_idx+1, (precond, new_state, a.name, sc_all) )
result_steps.append( (precond, new_state, a.name, sc_all) )
# step_added = True
# break
# if step_added:
# break
return result_steps #step_added
def process(self):
indent_str = " "
all_c = 1
while True:
print "*"
print "*"
print "*"
for s_idx in range(0, len(self.steps)-1):
prev_step = self.steps[s_idx]
next_step = self.steps[s_idx+1]
goal = next_step[0]
world_state = prev_step[1]
print "iterating steps:"
print "getAllPossibilities", goal
posi = getAllPossibilities(goal, None, world_state.objects_n_t_map)
# fork here: goal variants
p = posi[0]
all_c *= len(posi)
# step_added = False
new_steps = self.processGoal(posi[0], world_state)
if len(new_steps) > 0:
print "************* possible substitutions:"
for st in new_steps:
print st[2], st[3]
print "*************"
all_c *= len(new_steps)
step_added = True
self.steps.insert( s_idx+1, new_steps[0] )
else:
step_added = False
if step_added:
break
if not step_added:
break
# post process all steps - propagate world changes
for s_idx in range(0, len(self.steps)-1):
prev_step = self.steps[s_idx]
next_step = self.steps[s_idx+1]
world_state = prev_step[1]
action_name = next_step[2]
sc_all = next_step[3]
if action_name != None:
a = self.action_map[action_name]
new_state = copy.deepcopy(world_state)
action_args = ""
for par_name in a.param_list:
obj_name = sc_all[par_name]
action_args += obj_name + " "
new_state.simulateAction(a, action_args)
print "world state: " + a.name, sc_all
self.steps[s_idx+1] = (self.steps[s_idx+1][0], new_state, self.steps[s_idx+1][2], self.steps[s_idx+1][3])
if rospy.is_shutdown():
break
print "steps:"
for s in self.steps:
print s[2], s[3]
print "all_c", all_c
return
class SymbolicPlanner:
"""
class for SymbolicPlanner
"""
def __init__(self, pub_marker=None):
pass
def spin(self):
ws = WorldState()
ws.addType("VerticalPlane", [], ["pose_certain"])
ws.addType("Container", [], ["door1", "door2"])
ws.addType("Door", ["VerticalPlane"], ["state", "parent"])
ws.addType("Manipulator", [], ["grasped_name", "conf_feasible"])
ws.addType("Object", [], ["pose"])
ws.addObject("Container", "cab", {"door1":"door_cab_l", "door2":"door_cab_r"})
ws.addObject("Door", "door_cab_l", {"state":"closed", "parent":"cab", "pose_certain":False})
ws.addObject("Door", "door_cab_r", {"state":"closed", "parent":"cab", "pose_certain":False})
ws.addObject("Manipulator", "man_l", {"grasped_name":None, "conf_feasible":True})
ws.addObject("Manipulator", "man_r", {"grasped_name":None, "conf_feasible":True})
ws.addObject("Object", "jar", {"pose":"inside cab"})
# self.goal = "([opened ?door_cab_r] or (not [opened ?door_cab_r])) and [closed ?door_cab_l]"
# self.goal = "[opened door_cab_r] and [free man_l]"
# self.goal = "[opened door_cab_r] and [free man_l] and [grasped man_r jar]"
# self.goal = "[grasped man_r jar]"
self.goal = "[closed door_cab_r] and [closed door_cab_l] and [free man_l] and [grasped man_r jar]"
# unit tests
assert ws.getPred("free", "man_l") == True
assert ws.getPred("grasped", "man_l door_cab_l") == False
assert ws.getPred("conf_feasible", "man_l") == True
def a_explore_sim(d, m):
assert d != None
assert "pose_certain" in d[1]
assert d[1]["pose_certain"] == False
d[1]["pose_certain"] = True
a_explore = Action("explore",
"?vp VerticalPlane,?m Manipulator",
"[free ?m] and (not [pose_certain ?vp])",
"[pose_certain ?vp]",
"",
a_explore_sim)
def a_grasp_door_sim(d, m):
assert d != None
assert m != None
assert "grasped_name" in m[1]
# assert m[1]["grasped_name"] == None
assert "name" in d[1]
m[1]["grasped_name"] = d[1]["name"]
a_grasp_door = Action("grasp_door",
"?d Door,?m Manipulator",
"[free ?m] and [pose_certain ?d]",
"[grasped ?m ?d]",
"",
a_grasp_door_sim)
def a_ungrasp_door_sim(d, m):
assert m != None
m.is_free = True
m.grasped_name = None
a_ungrasp_door = Action("ungrasp_door",
"?d Door,?m Manipulator",
"[grasped ?m ?d]",
"[free ?m]",
"",
a_ungrasp_door_sim)
def a_ungrasp_sim(m):
assert m != None
assert "grasped_name" in m[1]
assert m[1]["grasped_name"] != None
m[1]["grasped_name"] = None
a_ungrasp = Action("ungrasp",
"?m Manipulator",
"not [free ?m]",
"[free ?m]",
"",
a_ungrasp_sim)
def a_open_door_sim(d, m):
assert d != None
assert "state" in d[1]
d[1]["state"] = "opened"
a_open_door = Action("open_door",
"?d Door,?m Manipulator",
"[grasped ?m ?d] and [conf_feasible ?m] and ([closed ?d] or [ajar ?d])",
"[opened ?d]",
"[grasped ?m ?d] and (not [conf_feasible ?m]) and ([closed ?d] or [ajar ?d])",
a_open_door_sim)
def a_close_door_sim(d, m):
assert d != None
assert "state" in d[1]
d[1]["state"] = "closed"
a_close_door = Action("close_door",
"?d Door,?m Manipulator",
"[grasped ?m ?d] and [conf_feasible ?m] and ([opened ?d] or [ajar ?d])",
"[closed ?d]",
"",
a_close_door_sim)
def a_grasp_object_sim(o, m):
assert o != None
assert m != None
assert "grasped_name" in m[1]
assert m[1]["grasped_name"] == None
assert "name" in o[1]
m[1]["grasped_name"] = o[1]["name"]
a_grasp_object = Action("grasp_object",
"?o Object,?m Manipulator",
"[free ?m] and [reachable ?o]",
"[grasped ?m ?o]",
"",
a_grasp_object_sim)
def a_uncover_sim(o1, o2, m):
assert o1 != None
assert o2 != None
assert m != None
m.is_free = False
m.grasped_name = o.name
a_uncover = Action("uncover",
"?o Object,?c Container,?d1 Door,?d2 Door",
"(not [reachable ?o]) and [inside ?o ?c] and [part_of ?d1 ?c] and [part_of ?d2 ?c] and [opened ?d1] and [opened ?d2]",
"[reachable ?o]",
"",
None)
self.actions = [a_explore, a_grasp_door, a_ungrasp, a_open_door, a_close_door, a_grasp_object, a_uncover]
s = Scenario(ws, self.goal, self.actions)
s.process()
return
if __name__ == '__main__':
rospy.init_node('symbolic_planner')
task = SymbolicPlanner()
task.spin()
|
bsd-3-clause
|
xubenben/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
moberweger/deep-prior-pp
|
src/main_icvl_posereg_embedding.py
|
1
|
8844
|
"""This is the main file for training hand joint classifier on ICVL dataset
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import matplotlib
matplotlib.use('Agg') # plot to file
import matplotlib.pyplot as plt
import os
import cPickle
from sklearn.decomposition import PCA
from trainer.poseregnettrainer import PoseRegNetTrainer, PoseRegNetTrainerParams
from net.poseregnet import PoseRegNetParams, PoseRegNet
from data.importers import ICVLImporter
from data.dataset import ICVLDataset
from util.handdetector import HandDetector
from util.handpose_evaluation import ICVLHandposeEvaluation
from data.transformations import transformPoints2D
from net.hiddenlayer import HiddenLayer, HiddenLayerParams
if __name__ == '__main__':
eval_prefix = 'ICVL_EMB_t0nF8mp421fD553h1024_PCA30_AUGMENT'
if not os.path.exists('./eval/'+eval_prefix+'/'):
os.makedirs('./eval/'+eval_prefix+'/')
rng = numpy.random.RandomState(23455)
print("create data")
aug_modes = ['com', 'rot', 'none'] # 'sc',
comref = None # "./eval/ICVL_COM_AUGMENT/net_ICVL_COM_AUGMENT.pkl"
docom = False
di = ICVLImporter('../data/ICVL/', refineNet=comref)
Seq1 = di.loadSequence('train', ['0'], shuffle=True, rng=rng, docom=docom)
trainSeqs = [Seq1]
Seq2 = di.loadSequence('test_seq_1', docom=docom)
testSeqs = [Seq2]
# create training data
trainDataSet = ICVLDataset(trainSeqs)
train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')
train_data_cube = numpy.asarray([Seq1.config['cube']]*train_data.shape[0], dtype='float32')
train_data_com = numpy.asarray([d.com for d in Seq1.data], dtype='float32')
train_data_M = numpy.asarray([d.T for d in Seq1.data], dtype='float32')
train_gt3Dcrop = numpy.asarray([d.gt3Dcrop for d in Seq1.data], dtype='float32')
mb = (train_data.nbytes) / (1024 * 1024)
print("data size: {}Mb".format(mb))
valDataSet = ICVLDataset(testSeqs)
val_data, val_gt3D = valDataSet.imgStackDepthOnly('test_seq_1')
testDataSet = ICVLDataset(testSeqs)
test_data, test_gt3D = testDataSet.imgStackDepthOnly('test_seq_1')
print train_gt3D.max(), test_gt3D.max(), train_gt3D.min(), test_gt3D.min()
print train_data.max(), test_data.max(), train_data.min(), test_data.min()
imgSizeW = train_data.shape[3]
imgSizeH = train_data.shape[2]
nChannels = train_data.shape[1]
####################################
# convert data to embedding
pca = PCA(n_components=30)
pca.fit(HandDetector.sampleRandomPoses(di, rng, train_gt3Dcrop, train_data_com, train_data_cube, 1e6,
aug_modes).reshape((-1, train_gt3D.shape[1]*3)))
train_gt3D_embed = pca.transform(train_gt3D.reshape((train_gt3D.shape[0], train_gt3D.shape[1]*3)))
test_gt3D_embed = pca.transform(test_gt3D.reshape((test_gt3D.shape[0], test_gt3D.shape[1]*3)))
val_gt3D_embed = pca.transform(val_gt3D.reshape((val_gt3D.shape[0], val_gt3D.shape[1]*3)))
############################################################################
print("create network")
batchSize = 128
poseNetParams = PoseRegNetParams(type=0, nChan=nChannels, wIn=imgSizeW, hIn=imgSizeH, batchSize=batchSize,
numJoints=1, nDims=train_gt3D_embed.shape[1])
poseNet = PoseRegNet(rng, cfgParams=poseNetParams)
poseNetTrainerParams = PoseRegNetTrainerParams()
poseNetTrainerParams.batch_size = batchSize
poseNetTrainerParams.learning_rate = 0.001
poseNetTrainerParams.weightreg_factor = 0.0
poseNetTrainerParams.force_macrobatch_reload = True
poseNetTrainerParams.para_augment = True
poseNetTrainerParams.augment_fun_params = {'fun': 'augment_poses', 'args': {'normZeroOne': False,
'di': di,
'aug_modes': aug_modes,
'hd': HandDetector(train_data[0, 0].copy(), abs(di.fx), abs(di.fy), importer=di),
'proj': pca}}
print("setup trainer")
poseNetTrainer = PoseRegNetTrainer(poseNet, poseNetTrainerParams, rng, './eval/'+eval_prefix)
poseNetTrainer.setData(train_data, train_gt3D_embed, val_data, val_gt3D_embed)
poseNetTrainer.addStaticData({'val_data_y3D': val_gt3D})
poseNetTrainer.addStaticData({'pca_data': pca.components_, 'mean_data': pca.mean_})
poseNetTrainer.addManagedData({'train_data_cube': train_data_cube,
'train_data_com': train_data_com,
'train_data_M': train_data_M,
'train_gt3Dcrop': train_gt3Dcrop})
poseNetTrainer.compileFunctions(compileDebugFcts=False)
###################################################################
# TRAIN
train_res = poseNetTrainer.train(n_epochs=100)
train_costs = train_res[0]
val_errs = train_res[2]
###################################################################
# TEST
# plot cost
fig = plt.figure()
plt.semilogy(train_costs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_cost.png')
fig = plt.figure()
plt.plot(numpy.asarray(val_errs).T)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png')
# save results
poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix))
# poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix))
# add prior to network
cfg = HiddenLayerParams(inputDim=(batchSize, train_gt3D_embed.shape[1]),
outputDim=(batchSize, numpy.prod(train_gt3D.shape[1:])), activation=None)
pcalayer = HiddenLayer(rng, poseNet.layers[-1].output, cfg, layerNum=len(poseNet.layers))
pcalayer.W.set_value(pca.components_)
pcalayer.b.set_value(pca.mean_)
poseNet.layers.append(pcalayer)
poseNet.output = pcalayer.output
poseNet.cfgParams.numJoints = train_gt3D.shape[1]
poseNet.cfgParams.nDims = train_gt3D.shape[2]
poseNet.cfgParams.outputDim = pcalayer.cfgParams.outputDim
poseNet.save("./eval/{}/network_prior.pkl".format(eval_prefix))
###################################################################
# test
print("Testing ...")
gt3D = [j.gt3Dorig for j in testSeqs[0].data]
jts_embed = poseNet.computeOutput(test_data)
jts = jts_embed
joints = []
for i in xrange(test_data.shape[0]):
joints.append(jts[i].reshape((-1, 3))*(testSeqs[0].config['cube'][2]/2.) + testSeqs[0].data[i].com)
joints = numpy.array(joints)
hpe = ICVLHandposeEvaluation(gt3D, joints)
hpe.subfolder += '/'+eval_prefix+'/'
print("Train samples: {}, test samples: {}".format(train_data.shape[0], len(gt3D)))
print("Mean error: {}mm, max error: {}mm".format(hpe.getMeanError(), hpe.getMaxError()))
print("{}".format([hpe.getJointMeanError(j) for j in range(joints[0].shape[0])]))
print("{}".format([hpe.getJointMaxError(j) for j in range(joints[0].shape[0])]))
# save results
cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix, os.path.split(__file__)[1], eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL)
print "Testing baseline"
#################################
# BASELINE
# Load the evaluation
data_baseline = di.loadBaseline('../data/ICVL/LRF_Results_seq_1.txt')
hpe_base = ICVLHandposeEvaluation(gt3D, data_baseline)
hpe_base.subfolder += '/'+eval_prefix+'/'
print("Mean error: {}mm".format(hpe_base.getMeanError()))
hpe.plotEvaluation(eval_prefix, methodName='Our regr', baseline=[('Tang et al.', hpe_base)])
ind = 0
for i in testSeqs[0].data:
if ind % 20 != 0:
ind += 1
continue
jtI = transformPoints2D(di.joints3DToImg(joints[ind]), i.T)
hpe.plotResult(i.dpt, i.gtcrop, jtI, "{}_{}".format(eval_prefix, ind))
ind += 1
|
gpl-3.0
|
mabuchilab/QNET
|
setup.py
|
1
|
2468
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os
from setuptools import setup, find_packages
def get_version(filename):
"""Extract the package version"""
with open(filename) as in_fh:
for line in in_fh:
if line.startswith('__version__'):
return line.split('=')[1].strip()[1:-1]
raise ValueError("Cannot extract version from %s" % filename)
with open('README.rst') as readme_file:
readme = readme_file.read()
try:
with open('HISTORY.rst') as history_file:
history = history_file.read()
except OSError:
history = ''
requirements = ['sympy<1.2', 'scipy', 'numpy', 'attrs', 'uniseg']
dev_requirements = [
'coverage', 'coveralls', 'pytest', 'pytest-cov', 'pytest-xdist', 'twine', 'pep8',
'flake8', 'wheel', 'sphinx', 'sphinx-autobuild', 'sphinx_rtd_theme',
'sphinx-autodoc-typehints', 'ipython']
dev_requirements.append('better-apidoc>=0.2.0')
version = get_version(os.path.join('.', 'src', 'qnet', '__init__.py'))
setup(
name='QNET',
version=version,
description="Computer algebra package for quantum mechanics and "
"photonic quantum networks",
author="Nikolas Tezak, Michael Goerz",
author_email='[email protected]',
url='https://github.com/mabuchilab/QNET',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Mathematics',
],
install_requires=requirements,
extras_require={
'dev': dev_requirements,
'simulation': ['qutip>=3.0.1'],
'visualization': ['matplotlib', 'pyx>0.14'],
},
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords=[
'qnet', 'computer algebra', 'symbolic algebra', 'science',
'quantum computing', 'quantum mechanics', 'quantum optics',
'quantum networks', 'circuits', 'SLH', 'qutip', 'sympy'],
packages=find_packages(where="src"),
package_dir={"": "src"},
zip_safe=False,
)
|
mit
|
dsullivan7/scikit-learn
|
examples/linear_model/plot_lasso_and_elasticnet.py
|
249
|
1982
|
"""
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
|
bsd-3-clause
|
dkhavari/open-source-investing
|
scripts/correlation.py
|
1
|
2790
|
from tsvprocessing import tsv_to_tuple_list
from pandas import Series
import datetime as dt
import time
import sys
import csv
import re
# ------------------------------
# Function: Grouping and Averaging
# ------------------------------
def make_grouped_second_prices(equity, top_bound, bottom_bound, decrement):
# Create result vector and variables.
averaged_time_groups = []
equity_counter = 0
# Iterate through the time vector in ten-second steps.
for i in xrange(0, 2340):
# Iterate through the equity and group based on ten-second intervals.
sum_of_interval = 0.0
number_to_average = 0.0 # Currently testing with this.
while True:
# Make sure we avoid any indexing errors.
if equity_counter >= len(equity):
break
# Set up the trade and execution time for use.
trade = equity[equity_counter]
time_of_execution = trade[0]
# If the time of execution is in the proper range, use it.
if time_of_execution <= top_bound and time_of_execution > bottom_bound:
price = trade[1]
sum_of_interval += price
number_to_average += 1
equity_counter += 1
else:
break
# Get the important metric: 10-second interval average price.
if sum_of_interval is 0.0: # In other words, nothing found in range.
average_executed_price = averaged_time_groups[i-1]
else:
average_executed_price = sum_of_interval / number_to_average
averaged_time_groups.append(average_executed_price)
# Move the loop forward.
top_bound = top_bound - decrement
bottom_bound = top_bound - decrement
return averaged_time_groups
# Access the two equity files passed in.
equity_one = str(sys.argv[1])
equity_two = str(sys.argv[2])
file_one = open(equity_one, 'r')
file_two = open(equity_two, 'r')
# Parse the TSV files into something intelligible.
tsv_one = csv.reader(file_one, delimiter='\t')
tsv_two = csv.reader(file_two, delimiter='\t')
# Use the miniature tsvprocessing module I made.
equity_one = tsv_to_tuple_list(tsv_one)
equity_two = tsv_to_tuple_list(tsv_two)
# Set up the loop for aggregating to 10-second averages.
decrement = dt.timedelta(seconds = 10)
top_bound = dt.datetime.strptime('16:00:00', '%H:%M:%S')
bottom_bound = top_bound - decrement
# Get the averages.
equity_one_ten_sec_avg = make_grouped_second_prices(equity_one, top_bound, bottom_bound, decrement)
equity_two_ten_sec_avg = make_grouped_second_prices(equity_two, top_bound, bottom_bound, decrement)
# Create the pandas series.
one = Series(equity_one_ten_sec_avg)
two = Series(equity_two_ten_sec_avg)
# Compute Covariance.
covariance = one.cov(two)
# Compute standard deviations.
one_stddev = one.std()
two_stddev = two.std()
# Correlation coefficient...
correlation_coefficient = covariance/(one_stddev*two_stddev)
print correlation_coefficient
|
mit
|
numenta-ci/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cairo.py
|
69
|
16706
|
"""
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as npy
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
def set_ctx_from_surface (self, surface):
self.ctx = cairo.Context (surface)
self.ctx.save() # restore, save - when call new_gc()
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
#@staticmethod
def convert_path(ctx, tpath):
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
tpath = transform.transform_path(path)
ctx.new_path()
self.convert_path(ctx, tpath)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.ctx
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * npy.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * npy.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.ctx.restore() # matches save() in set_ctx_from_surface()
self.ctx.save()
return GraphicsContextCairo (renderer=self)
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.ctx = renderer.ctx
def set_alpha(self, alpha):
self._alpha = alpha
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
self._cliprect = rectangle
if rectangle is None:
return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
# Alternative: just set _cliprect here and actually set cairo clip rect
# in fill_and_stroke() inside ctx.save() ... ctx.restore()
def set_clip_path(self, path):
if path is not None:
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
tpath = affine.transform_path(tpath)
RendererCairo.convert_path(ctx, tpath)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (npy.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.ctx
if orientation == 'landscape':
ctx.rotate (npy.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
|
agpl-3.0
|
huzq/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
23
|
1405
|
"""
==============
SGD: Penalties
==============
Contours of where the penalty is equal to 1
for the three penalties L1, L2 and elastic-net.
All of the above are supported by :class:`~sklearn.linear_model.SGDClassifier`
and :class:`~sklearn.linear_model.SGDRegressor`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
line = np.linspace(-1.5, 1.5, 1001)
xx, yy = np.meshgrid(line, line)
l2 = xx ** 2 + yy ** 2
l1 = np.abs(xx) + np.abs(yy)
rho = 0.5
elastic_net = rho * l1 + (1 - rho) * l2
plt.figure(figsize=(10, 10), dpi=100)
ax = plt.gca()
elastic_net_contour = plt.contour(xx, yy, elastic_net, levels=[1],
colors=elastic_net_color)
l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color)
l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color)
ax.set_aspect("equal")
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
plt.clabel(elastic_net_contour, inline=1, fontsize=18,
fmt={1.0: 'elastic-net'}, manual=[(-1, -1)])
plt.clabel(l2_contour, inline=1, fontsize=18,
fmt={1.0: 'L2'}, manual=[(-1, -1)])
plt.clabel(l1_contour, inline=1, fontsize=18,
fmt={1.0: 'L1'}, manual=[(-1, -1)])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
harshaneelhg/scikit-learn
|
sklearn/kernel_ridge.py
|
155
|
6545
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
joshloyal/scikit-learn
|
sklearn/utils/graph.py
|
24
|
6326
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph : sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> list(sorted(single_source_shortest_path_length(graph, 0).items()))
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> graph = np.ones((6, 6))
>>> list(sorted(single_source_shortest_path_length(graph, 2).items()))
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
pelson/numpy
|
doc/sphinxext/docscrape_sphinx.py
|
154
|
7759
|
import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
AnthonyCAS/code-for-blog
|
2008/wx_mpl_dynamic_graph.py
|
13
|
11139
|
"""
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
class DataGen(object):
""" A silly class that generates pseudo-random data for
display in the plot.
"""
def __init__(self, init=50):
self.data = self.init = init
def next(self):
self._recalc_data()
return self.data
def _recalc_data(self):
delta = random.uniform(-0.5, 0.5)
r = random.random()
if r > 0.9:
self.data += delta * 15
elif r > 0.8:
# attraction to the initial value
delta += (0.5 if self.init > self.data else -0.5)
self.data += delta
else:
self.data += delta
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.datagen = DataGen()
self.data = [self.datagen.next()]
self.paused = False
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(100)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('black')
self.axes.set_title('Very important random data', size=12)
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_data = self.axes.plot(
self.data,
linewidth=1,
color=(1, 1, 0),
)[0]
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
if self.xmax_control.is_auto():
xmax = len(self.data) if len(self.data) > 50 else 50
else:
xmax = int(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = xmax - 50
else:
xmin = int(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
if self.ymin_control.is_auto():
ymin = round(min(self.data), 0) - 1
else:
ymin = int(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
ymax = round(max(self.data), 0) + 1
else:
ymax = int(self.ymax_control.manual_value())
self.axes.set_xbound(lower=xmin, upper=xmax)
self.axes.set_ybound(lower=ymin, upper=ymax)
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
if self.cb_grid.IsChecked():
self.axes.grid(True, color='gray')
else:
self.axes.grid(False)
# Using setp here is convenient, because get_xticklabels
# returns a list over which one needs to explicitly
# iterate, and setp already handles this.
#
pylab.setp(self.axes.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
self.plot_data.set_xdata(np.arange(len(self.data)))
self.plot_data.set_ydata(np.array(self.data))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused:
self.data.append(self.datagen.next())
self.draw_plot()
def on_exit(self, event):
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
|
unlicense
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 1.2s/hmm_crossvalidation_force_motion_10_states_scaled_wrt_all_data.py
|
1
|
39780
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv3 import Fmat_original_hshv
from data_variable_hslv3 import Fmat_original_hslv
from data_variable_lshv3 import Fmat_original_lshv
from data_variable_lslv3 import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:121,0:]
Fvec_b = mat[121:242,0:]
Fvec_c = mat[242:363,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = Fmat_original_hshv[:,15:15]
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,15:26])
Fmat_sm_hshv = scaling(Fmat_original_hshv[:,26:27])
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:52])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:16])
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,16:22])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,22:28])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:28])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,28:36])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,36:42])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*10
B_rm_hshv = [0.0]*10
B_sf_hshv = [0.0]*10
B_sm_hshv = [0.0]*10
for num_states in range(10):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))
total_seq_rf_hshv = np.zeros((242,45))
total_seq_rm_hshv = np.zeros((242,29))
total_seq_sf_hshv = np.zeros((242,29))
total_seq_sm_hshv = np.zeros((242,19))
i = 0
j = 0
while i < 242:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((242,27))
total_seq_obj_force_hshv = Fmat_hshv[0:121,:]
total_seq_obj_motion_hshv = Fmat_hshv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:27])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:27])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:27])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:27])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*10
B_rm_hslv = [0.0]*10
B_sf_hslv = [0.0]*10
B_sm_hslv = [0.0]*10
for num_states in range(10):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))
total_seq_rf_hslv = np.zeros((242,45))
total_seq_rm_hslv = np.zeros((242,14))
total_seq_sf_hslv = np.zeros((242,25))
total_seq_sm_hslv = np.zeros((242,13))
i = 0
j = 0
while i < 242:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((242,52))
total_seq_obj_force_hslv = Fmat_hslv[0:121,:]
total_seq_obj_motion_hslv = Fmat_hslv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:52])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:52])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:52])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:52])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lslv[242:363,15:28])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lslv[242:363,28:36])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*10
B_rm_lshv = [0.0]*10
B_sf_lshv = [0.0]*10
B_sm_lshv = [0.0]*10
for num_states in range(10):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lslv[242:363,36:42])))
total_seq_rf_lshv = np.zeros((242,45))
total_seq_rm_lshv = np.zeros((242,28))
total_seq_sf_lshv = np.zeros((242,34))
total_seq_sm_lshv = np.zeros((242,14))
i = 0
j = 0
while i < 242:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((242,28))
total_seq_obj_force_lshv = Fmat_lshv[0:121,:]
total_seq_obj_motion_lshv = Fmat_lshv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:22])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,22:28])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:22])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,22:28])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:22])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,22:28])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:22])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,22:28])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*10
B_rm_lslv = [0.0]*10
B_sf_lslv = [0.0]*10
B_sm_lslv = [0.0]*10
for num_states in range(10):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28])))
total_seq_rf_lslv = np.zeros((242,45))
total_seq_rm_lslv = np.zeros((242,16))
total_seq_sf_lslv = np.zeros((242,32))
total_seq_sm_lslv = np.zeros((242,14))
i = 0
j = 0
while i < 242:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((242,42))
total_seq_obj_force_lslv = Fmat_lslv[0:121,:]
total_seq_obj_motion_lslv = Fmat_lslv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:36])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,36:42])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:36])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,36:42])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:36])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,36:42])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:36])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,36:42])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_10_states.png')
pp.show()
|
mit
|
AlexandreAbraham/brainhack2013
|
brainhack/covariance/multi_covariance.py
|
1
|
5273
|
from sklearn.base import clone, BaseEstimator
from nilearn.input_data.base_masker import filter_and_mask
from nilearn._utils.cache_mixin import cache
from nilearn.input_data import MultiNiftiMasker
from joblib import Memory, Parallel, delayed
import warnings
import nibabel
from nilearn._utils.class_inspect import get_params
def subject_covariance(
estimator, niimgs, mask_img, parameters,
confounds=None,
ref_memory_level=0,
memory=Memory(cachedir=None),
connectivity=None,
verbose=0,
copy=True):
data, affine = cache(
filter_and_mask, memory=memory, ref_memory_level=ref_memory_level,
memory_level=2,
ignore=['verbose', 'memory', 'ref_memory_level', 'copy'])(
niimgs, mask_img, parameters,
ref_memory_level=ref_memory_level,
memory=memory,
verbose=verbose,
confounds=confounds,
copy=copy)
estimator = clone(estimator)
if connectivity is not None:
estimator.fit(data, connectivity=connectivity)
else:
estimator.fit(data)
return estimator.covariance_
class MultiCovariance(BaseEstimator):
def __init__(self, estimator, smoothing_fwhm=None, mask=None,
detrend=None, standardize=None,
target_affine=None, target_shape=None,
low_pass=None, high_pass=None, t_r=None,
memory=Memory(cachedir=None), memory_level=0,
n_jobs=1, verbose=0):
self.estimator = estimator
self.mask = mask
self.memory = memory
self.memory_level = memory_level
self.n_jobs = n_jobs
self.verbose = verbose
self.low_pass = low_pass
self.high_pass = high_pass
self.t_r = t_r
self.smoothing_fwhm = smoothing_fwhm
self.target_affine = target_affine
self.target_shape = target_shape
self.standardize = standardize
self.detrend = detrend
def fit(self, niimgs=None, y=None, confounds=None, connectivity=None):
"""Compute the mask and the components
Parameters
----------
niimgs: list of filenames or NiImages
Data on which the PCA must be calculated. If this is a list,
the affine is considered the same for all.
"""
# Hack to support single-subject data:
if isinstance(niimgs, (basestring, nibabel.Nifti1Image)):
niimgs = [niimgs]
# This is a very incomplete hack, as it won't work right for
# single-subject list of 3D filenames
# First, learn the mask
if not isinstance(self.mask, MultiNiftiMasker):
self.masker_ = MultiNiftiMasker(mask=self.mask,
smoothing_fwhm=self.smoothing_fwhm,
target_affine=self.target_affine,
target_shape=self.target_shape,
low_pass=self.low_pass,
high_pass=self.high_pass,
t_r=self.t_r,
memory=self.memory,
memory_level=self.memory_level)
else:
try:
self.masker_ = clone(self.mask)
except TypeError as e:
# Workaround for a joblib bug: in joblib 0.6, a Memory object
# with cachedir = None cannot be cloned.
masker_memory = self.mask.memory
if masker_memory.cachedir is None:
self.mask.memory = None
self.masker_ = clone(self.mask)
self.mask.memory = masker_memory
self.masker_.memory = Memory(cachedir=None)
else:
# The error was raised for another reason
raise e
for param_name in ['target_affine', 'target_shape',
'smoothing_fwhm', 'low_pass', 'high_pass',
't_r', 'memory', 'memory_level']:
if getattr(self.masker_, param_name) is not None:
warnings.warn('Parameter %s of the masker overriden'
% param_name)
setattr(self.masker_, param_name,
getattr(self, param_name))
if self.masker_.mask is None:
self.masker_.fit(niimgs)
else:
self.masker_.fit()
self.mask_img_ = self.masker_.mask_img_
parameters = get_params(MultiNiftiMasker, self)
# Now compute the covariances
self.covariances_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(subject_covariance)(
self.estimator,
niimg,
self.masker_.mask_img_,
parameters,
memory=self.memory,
ref_memory_level=self.memory_level,
confounds=confounds,
connectivity=connectivity,
verbose=self.verbose
)
for niimg in niimgs)
return self
|
bsd-3-clause
|
donK23/shed01
|
PyBeispiele/SherlockRec/model.py
|
1
|
3603
|
"""
Name: model
Purpose: Analytical model for collaborative filtering
Author: Thomas Treml ([email protected])
Date: 2015-08-31
"""
import numpy as np
import pandas as pd
from sklearn.metrics import jaccard_similarity_score
from sklearn.cross_validation import train_test_split
import cPickle
# Load pre-loaded dataframes and explore
books_df = cPickle.load(open("./data/books_df.p", "rb"))
ratings_df = cPickle.load(open("./data/ratings_df.p", "rb"))
# train-test-split
train_df, test_df = train_test_split(ratings_df, test_size=0.2)
# dummy ratings
my_rating = [-1, 1, 1, -1, 0., 1, -1, 0., 0., 0., 0., 0., 0., 0., 0.]
dummy_rating = list(np.random.randint(-1, 1, size=15))
""" Similarity Measurement """
def all_books_recommendation(user_rating=my_rating, ratings_data=ratings_df, method=jaccard_similarity_score):
"""
Recommendation engine based on collaborative filtering. Creates recommendation array summing up weighted similarity
scores by book, divided by sum of user similarities per rating.
:param user_rating: list of user inputs. Size 15, nan replaced with 0
:param ratings_data: dataframe with user ratings about the 15 books
:param method: similarity measurement method. Jaccard similarity (default)
:return: array of recommendation ratings for all 15 books
"""
ratings_matrix = ratings_data.ix[: , "RAT_B01":"RAT_B15"].as_matrix().astype(float)
weighted_ratings = np.array([]); user_similarities = np.array([])
for row in ratings_matrix:
similarity_coefficient = method(user_rating, row)
weighted_row = row * similarity_coefficient
row[row != 0.] = similarity_coefficient
if weighted_ratings.size == 0 and user_similarities.size == 0:
weighted_ratings = np.hstack((weighted_ratings, weighted_row))
user_similarities = np.hstack((user_similarities, row))
else:
weighted_ratings = np.vstack((weighted_ratings, weighted_row))
user_similarities = np.vstack((user_similarities, row))
total = np.sum(weighted_ratings, axis=0)
sim_sum = np.sum(user_similarities, axis=0)
return total / sim_sum
def filter_recommendation(recommendation, user_input=my_rating):
"""
Filter recommendation array based on input books
:param recommendation: recommendation array of unsorted recommendation values
:param user_input: list from user input
:return: tuple ([index of recommended books],[ratings of recommended books])
"""
recommend_books = []
recommend_books_ratings = []
for position, item in enumerate(user_input):
if item == 0:
recommend_books.append(position)
for book_id in recommend_books:
recommend_books_ratings.append(recommendation[book_id])
return recommend_books, recommend_books_ratings
def sort_recommendation(recommended_idx, recommended_ratings):
"""
Sort recommendation based on rating values
:param recommended_idx: list of book indices from recommendation
:param recommended_ratings: list of ratings from recommendation
:return: list of sorted recommendations - tuple of (book_id, rating)
"""
recommended_dict = {}
for id, rating in zip(recommended_idx, recommended_ratings):
recommended_dict[id] = rating
return sorted(recommended_dict.items(), key=lambda x: x[1], reverse=True)
recommended_books, recommended_ratings = filter_recommendation(all_books_recommendation(my_rating, ratings_data=train_df))
my_recommdendation = sort_recommendation(recommended_books, recommended_ratings)
print my_recommdendation
|
apache-2.0
|
zak-k/cartopy
|
lib/cartopy/tests/mpl/test_axes.py
|
3
|
2980
|
# (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import unittest
from matplotlib.testing.decorators import cleanup
import matplotlib.path as mpath
import matplotlib.pyplot as plt
from nose.tools import assert_equal
import numpy as np
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import InterProjectionTransform
from .test_caching import CallCounter
class TestNoSpherical(unittest.TestCase):
def setUp(self):
self.ax = plt.axes(projection=ccrs.PlateCarree())
self.data = np.arange(12).reshape((3, 4))
def tearDown(self):
plt.clf()
plt.close()
def test_contour(self):
with self.assertRaises(ValueError):
self.ax.contour(self.data, transform=ccrs.Geodetic())
def test_contourf(self):
with self.assertRaises(ValueError):
self.ax.contourf(self.data, transform=ccrs.Geodetic())
def test_pcolor(self):
with self.assertRaises(ValueError):
self.ax.pcolor(self.data, transform=ccrs.Geodetic())
def test_pcolormesh(self):
with self.assertRaises(ValueError):
self.ax.pcolormesh(self.data, transform=ccrs.Geodetic())
def test_transform_PlateCarree_shortcut():
src = ccrs.PlateCarree(central_longitude=0)
target = ccrs.PlateCarree(central_longitude=180)
# of the 3 paths, 2 of them cannot be short-cutted.
pth1 = mpath.Path([[0.5, 0], [10, 10]])
pth2 = mpath.Path([[0.5, 91], [10, 10]])
pth3 = mpath.Path([[-0.5, 0], [10, 10]])
trans = InterProjectionTransform(src, target)
counter = CallCounter(target, 'project_geometry')
with counter:
trans.transform_path(pth1)
# pth1 should allow a short-cut.
assert_equal(counter.count, 0)
with counter:
trans.transform_path(pth2)
assert_equal(counter.count, 1)
with counter:
trans.transform_path(pth3)
assert_equal(counter.count, 2)
@cleanup
def test_geoaxes_subplot():
ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())
assert_equal(str(ax.__class__),
"<class 'cartopy.mpl.geoaxes.GeoAxesSubplot'>")
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
lgpl-3.0
|
ahoyosid/scikit-learn
|
sklearn/lda.py
|
15
|
17655
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
bsd-3-clause
|
ch3ll0v3k/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
aigamedev/scikit-neuralnetwork
|
sknn/tests/test_types.py
|
3
|
6029
|
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal, assert_true)
import os
import random
import shutil
import tempfile
import numpy
import pandas
import theano
import scipy.sparse
from sknn.mlp import MultiLayerPerceptron as MLP
from sknn.mlp import Layer as L, Convolution as C
# Sparse matrix must support indexing. Other types but these do not work for this reason.
SPARSE_TYPES = ['csr_matrix', 'csc_matrix', 'dok_matrix', 'lil_matrix']
class TestScipySparseMatrix(unittest.TestCase):
def setUp(self):
self.nn = MLP(layers=[L("Linear", units=4)], n_iter=1)
def test_FitFloat64(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
y = sparse_matrix((8, 4), dtype=numpy.float64)
self.nn._fit(X, y)
def test_FitFloat32(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = sparse_matrix((8, 4), dtype=numpy.float32)
self.nn._fit(X, y)
def test_FitHybrid(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = numpy.zeros((8, 4), dtype=numpy.float32)
self.nn._fit(X, y)
def test_FitMutator(self):
def mutate(Xb, **_):
self.count += 1
Xb -= 0.5
self.nn.callback = {'on_batch_start': mutate}
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = numpy.zeros((8, 4), dtype=numpy.float32)
self.count = 0
assert_equal(0, self.count)
self.nn._fit(X, y)
assert_equal(8, self.count)
def test_Predict64(self):
theano.config.floatX = 'float64'
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
yp = self.nn._predict(X)
assert_equal(yp.dtype, numpy.float64)
def test_Predict32(self):
theano.config.floatX = 'float32'
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
yp = self.nn._predict(X)
assert_equal(yp.dtype, numpy.float32)
class TestMemoryMap(unittest.TestCase):
__types__ = ['float32', 'float64']
def setUp(self):
self.nn = MLP(layers=[L("Linear", units=3)], n_iter=1)
self.directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.directory)
def make(self, name, shape, dtype):
filename = os.path.join(self.directory, name)
return numpy.memmap(filename, dtype=dtype, mode='w+', shape=shape)
def test_FitAllTypes(self):
for t in self.__types__:
theano.config.floatX = t
X = self.make('X', (12, 3), dtype=t)
y = self.make('y', (12, 3), dtype=t)
self.nn._fit(X, y)
def test_PredictAllTypes(self):
for t in self.__types__:
theano.config.floatX = t
X = self.make('X', (12, 3), dtype=t)
yp = self.nn._predict(X)
class TestPandasDataFrame(TestMemoryMap):
__types__ = ['float32']
def make(self, _, shape, dtype):
return pandas.DataFrame(numpy.random.uniform(-1.0, 1.0, size=shape), dtype=dtype)
class TestConvolution(unittest.TestCase):
def setUp(self):
self.nn = MLP(
layers=[
C("Rectifier", kernel_shape=(3,3), channels=4),
L("Linear")],
n_iter=1)
def test_FitError(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X, y = sparse_matrix((8, 16)), sparse_matrix((8, 16))
assert_raises((TypeError, NotImplementedError), self.nn._fit, X, y)
def test_FitResizeSquare(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
X, y = numpy.zeros((8, 36)), numpy.zeros((8, 4))
self.nn._fit(X, y)
def test_FitResizeFails(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
X, y = numpy.zeros((8, 35)), numpy.zeros((8, 4))
assert_raises(AssertionError, self.nn._fit, X, y)
class TestFormatDeterminism(unittest.TestCase):
def test_TrainRandomOneEpoch(self):
for t in ['dok_matrix', 'lil_matrix']:
sparse_matrix = getattr(scipy.sparse, t)
X_s, y_s = sparse_matrix((8, 16), dtype=numpy.float32), sparse_matrix((8, 16), dtype=numpy.float32)
for i in range(X_s.shape[0]):
X_s[i,random.randint(0, X_s.shape[1]-1)] = 1.0
y_s[i,random.randint(0, y_s.shape[1]-1)] = 1.0
X, y = X_s.toarray(), y_s.toarray()
nn1 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn1._fit(X, y)
nn2 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn2._fit(X_s, y_s)
assert_true(numpy.all(nn1._predict(X_s) == nn1._predict(X_s)))
def test_TrainConstantOneEpoch(self):
for t in ['csr_matrix', 'csc_matrix']:
sparse_matrix = getattr(scipy.sparse, t)
X_s, y_s = sparse_matrix((8, 16), dtype=numpy.float32), sparse_matrix((8, 16), dtype=numpy.float32)
X, y = X_s.toarray(), y_s.toarray()
nn1 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn1._fit(X, y)
nn2 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn2._fit(X_s, y_s)
assert_true(numpy.all(nn1._predict(X_s) == nn1._predict(X_s)))
|
bsd-3-clause
|
dsquareindia/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
53
|
13398
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
|
bsd-3-clause
|
LEX2016WoKaGru/pyClamster
|
examples/clustering/cloud_clustering_slic.py
|
1
|
4919
|
# -*- coding: utf-8 -*-
"""
Created on 13.06.16
Created for pyclamster
@author: Tobias Sebastian Finn, [email protected]
Copyright (C) {2016} {Tobias Sebastian Finn}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
import pickle
import warnings
import glob
import os
import time
# External modules
import numpy as np
import scipy.interpolate
import scipy.misc
import scipy.ndimage
from skimage.segmentation import slic
from skimage.filters import threshold_otsu
from skimage.filters import rank
from skimage.morphology import disk
from skimage.color import rgb2hsv
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
from skimage import exposure
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Internal modules
from pyclamster import Image, Labels
from pyclamster.clustering.preprocess import LCN
from pyclamster.functions import rbDetection
plt.style.use('typhon')
warnings.catch_warnings()
warnings.filterwarnings('ignore')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
image_directory = os.path.join(BASE_DIR, "examples", "images", 'wettermast')
trained_models = os.path.join(BASE_DIR, "data")
good_angle = 45
center = int(1920/2)
good_angle_dpi = int(np.round(1920 / 180 * good_angle))
denoising_ratio = 10
all_images = glob.glob(os.path.join(image_directory, "Image_*.jpg"))
predictor = pickle.load(open(os.path.join(trained_models, "kmeans.pk"), "rb"))
for image_path in all_images:
image = Image(image_path)
image.cut([center - good_angle_dpi, center-good_angle_dpi, center+good_angle_dpi, center + good_angle_dpi]).save('test.jpg')
image.data = image.data[center - good_angle_dpi:center + good_angle_dpi,
center - good_angle_dpi:center + good_angle_dpi]
#selem = np.ones((50,50))
#image.data = equal_each(image.data, selem)
#segmented_image = slic(image.data, n_segments=50, compactness=10, sigma=1)+1
segmented_image = slic(image.data, slic_zero=True)+1
rb_image = image.data[:,:,0]-image.data[:,:,2]
#selem = np.ones((250, 250))
#rb_image = exposure.adjust_sigmoid(rb_image, cutoff=0.1, gain=0.5)
#p2, p98 = np.percentile(rb_image, (2, 98))
#rb_image = exposure.rescale_intensity(rb_image, in_range=(p2, p98))
#rb_image = exposure.equalize_adapthist(rb_image, clip_limit=0.03)*255
# rb_image = rb_image-np.min(rb_image)
# rb_image = rb_image/(np.max(rb_image)-np.min(rb_image)+0.1)
# rb_image = rb_image*255
#p10, p90 = np.percentile(rb_image, (10, 90))
global_thres = threshold_otsu(
rb_image[rb_image>10])
threshold_array = np.zeros_like(segmented_image)
threshold_array[:] = global_thres
# threshold_array[:] = np.nan
# for label in np.unique(segmented_image):
# masked_rb = np.ma.masked_where(segmented_image!=label, rb_image)
# lcenter = segmented_image==label
# if (masked_rb.max()<global_thres) or (masked_rb.min()>global_thres):
# threshold_array[lcenter] = global_thres
# else:
# local_otsu = threshold_otsu(rb_image[segmented_image==label])
# threshold_array[lcenter] = 0.5*local_otsu+0.5*global_thres
# threshold_array = scipy.ndimage.filters.maximum_filter(threshold_array, footprint=np.ones((40,40)), mode='nearest')
# threshold_array = scipy.ndimage.filters.gaussian_filter(threshold_array, sigma=20, mode='nearest')
label = Labels((np.logical_or(rb_image>threshold_array, rb_image<10)).astype(int))
scipy.misc.imsave("cloud.png", label.labels)
masks = label.getMaskStore()
masks.denoise([1], 2000)
cloud_labels, _ = masks.givenLabelMask(segmented_image, [1,])
scipy.misc.imsave("labels.png", cloud_labels.labels)
gs = gridspec.GridSpec(2,3)
ax = plt.subplot(gs[0, 0])
ax.imshow(image.data)
ax = plt.subplot(gs[0, 1])
ax.imshow(rb_image)
ax = plt.subplot(gs[1, :2])
ax.hist(rb_image.reshape((-1)), bins=256)
ax.axvline(x=global_thres)
ax = plt.subplot(gs[1, 2])
ax.imshow(threshold_array)
ax = plt.subplot(gs[0, 2])
ax.imshow(cloud_labels.labels)
plt.show()
#scipy.misc.imshow(cloud_labels.labels)
cloud_store = cloud_labels.getMaskStore()
|
gpl-3.0
|
altairpearl/scikit-learn
|
sklearn/cluster/k_means_.py
|
5
|
59442
|
"""K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
bsd-3-clause
|
quantopian/zipline
|
zipline/__main__.py
|
1
|
11860
|
import errno
import os
import click
import logbook
import pandas as pd
from six import text_type
import zipline
from zipline.data import bundles as bundles_module
from trading_calendars import get_calendar
from zipline.utils.compat import wraps
from zipline.utils.cli import Date, Timestamp
from zipline.utils.run_algo import _run, BenchmarkSpec, load_extensions
from zipline.extensions import create_args
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a zipline extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then zipline will not '
'run if it cannot load all of the specified extensions. '
'If this is not passed or --non-strict-extensions is passed '
'then the failure will be logged but execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.",
)
@click.option(
'-x',
multiple=True,
help='Any custom command line arguments to define, in key=value form.'
)
@click.pass_context
def main(ctx, extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
DEFAULT_BUNDLE = 'quantopian-quandl'
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be any "
"python expression. These are evaluated in order so they may refer "
"to previously defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
default=10e6,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-bf',
'--benchmark-file',
default=None,
type=click.Path(exists=True, dir_okay=False, readable=True, path_type=str),
help='The csv file that contains the benchmark returns',
)
@click.option(
'--benchmark-symbol',
default=None,
type=click.STRING,
help="The symbol of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
'--benchmark-sid',
default=None,
type=int,
help="The sid of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
'--no-benchmark',
is_flag=True,
default=False,
help="If passed, use a benchmark of zero returns.",
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--trading-calendar',
metavar='TRADING-CALENDAR',
default='XNYS',
help="The calendar you want to use e.g. XLON. XNYS is the default."
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@click.option(
'--metrics-set',
default='default',
help='The metrics set to use. New metrics sets may be registered in your'
' extension.py.',
)
@click.option(
'--blotter',
default='default',
help="The blotter to use.",
show_default=True,
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
benchmark_file,
benchmark_symbol,
benchmark_sid,
no_benchmark,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter):
"""Run a backtest for the given algorithm.
"""
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
benchmark_spec = BenchmarkSpec.from_cli_params(
no_benchmark=no_benchmark,
benchmark_sid=benchmark_sid,
benchmark_symbol=benchmark_symbol,
benchmark_file=benchmark_file,
)
return _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_spec=benchmark_spec,
)
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to ingest.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
if __name__ == '__main__':
main()
|
apache-2.0
|
shenzebang/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
quiltdata/quilt
|
api/python/tests/integration/test_packages.py
|
1
|
85028
|
""" Integration tests for Quilt Packages. """
import io
import locale
import os
import pathlib
import shutil
import tempfile
from collections import Counter
from contextlib import redirect_stderr
from datetime import datetime
from io import BytesIO
from pathlib import Path
from unittest import mock
from unittest.mock import ANY, Mock, call, patch
import jsonlines
import pandas as pd
import pytest
import quilt3
from quilt3 import Package
from quilt3.backends.local import (
LocalPackageRegistryV1,
LocalPackageRegistryV2,
)
from quilt3.backends.s3 import S3PackageRegistryV1, S3PackageRegistryV2
from quilt3.util import (
PhysicalKey,
QuiltException,
RemovedInQuilt4Warning,
validate_package_name,
)
from ..utils import QuiltTestCase
DATA_DIR = Path(__file__).parent / 'data'
LOCAL_MANIFEST = DATA_DIR / 'local_manifest.jsonl'
REMOTE_MANIFEST = DATA_DIR / 'quilt_manifest.jsonl'
SERIALIZATION_DIR = Path('serialization_dir')
LOCAL_REGISTRY = Path('local_registry') # Set by QuiltTestCase
def _mock_copy_file_list(file_list, callback=None, message=None):
return [key for _, key, _ in file_list]
class PackageTest(QuiltTestCase):
default_registry_version = 1
S3PackageRegistryDefault = S3PackageRegistryV1
LocalPackageRegistryDefault = LocalPackageRegistryV1
default_test_top_hash = 'e99b760a05539460ac0a7349abb8f476e8c75282a38845fa828f8a5d28374303'
def setUp(self):
super().setUp()
load_config_wrapped = quilt3.util.load_config
def load_config_wrapper():
config = load_config_wrapped()
config.update(default_registry_version=self.default_registry_version)
return config
_config_patcher = patch(
'quilt3.util.load_config',
side_effect=load_config_wrapper,
)
self.addCleanup(_config_patcher.stop)
_config_patcher.start()
def _patch_registry(self, obj, *args, **kwargs):
patcher = patch.object(obj, *args, **kwargs)
self.addCleanup(patcher.stop)
return patcher.start()
def patch_local_registry(self, *args, **kwargs):
return self._patch_registry(self.LocalPackageRegistryDefault, *args, **kwargs)
def patch_s3_registry(self, *args, **kwargs):
return self._patch_registry(self.S3PackageRegistryDefault, *args, **kwargs)
def setup_s3_stubber_resolve_pointer(self, pkg_registry, pkg_name, *, pointer, top_hash):
self.s3_stubber.add_response(
method='get_object',
service_response={
'VersionId': 'v1',
'Body': BytesIO(top_hash.encode()),
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,
}
)
def setup_s3_stubber_delete_pointer(self, pkg_registry, pkg_name, *, pointer):
self.s3_stubber.add_response(
method='delete_object',
service_response={},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,
}
)
def setup_s3_stubber_pkg_install(self, pkg_registry, pkg_name, *, top_hash=None, manifest=None, entries=()):
top_hash = top_hash or self.default_test_top_hash
self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer='latest', top_hash=top_hash)
if manifest:
self.s3_stubber.add_response(
method='head_object',
service_response={
'VersionId': 'v1',
'ContentLength': len(manifest),
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
}
)
self.s3_stubber.add_response(
method='get_object',
service_response={
'VersionId': 'v1',
'Body': BytesIO(manifest),
'ContentLength': len(manifest),
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'VersionId': 'v1',
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
}
)
for url, content in entries:
key = PhysicalKey.from_url(url)
self.s3_stubber.add_response(
method='get_object',
service_response={
'VersionId': 'v1',
'Body': BytesIO(content),
},
expected_params={
'Bucket': key.bucket,
'Key': key.path,
}
)
def setup_s3_stubber_list_top_hash_candidates(self, pkg_registry, pkg_name, top_hashes):
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [
{
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
'Size': 64,
}
for top_hash in top_hashes
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,
}
)
def setup_s3_stubber_push_manifest(self, pkg_registry, pkg_name, top_hash, *, pointer_name):
self.s3_stubber.add_response(
method='put_object',
service_response={
'VersionId': 'v2'
},
expected_params={
'Body': ANY,
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
}
)
if pkg_registry.revision_pointers:
self.s3_stubber.add_response(
method='put_object',
service_response={
'VersionId': 'v3'
},
expected_params={
'Body': top_hash.encode(),
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_pk(pkg_name, pointer_name).path,
}
)
self.s3_stubber.add_response(
method='put_object',
service_response={
'VersionId': 'v4'
},
expected_params={
'Body': top_hash.encode(),
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_latest_pk(pkg_name).path,
}
)
def setup_s3_stubber_upload_pkg_data(self, pkg_registry, pkg_name, *, lkey, data, version):
self.s3_stubber.add_response(
method='put_object',
service_response={
'VersionId': version,
},
expected_params={
'Body': ANY, # TODO: use data here.
'Bucket': pkg_registry.root.bucket,
'Key': f'{pkg_name}/{lkey}',
}
)
def setup_s3_stubber_list_pkg_pointers(self, pkg_registry, pkg_name, *, pointers):
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [
{
'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,
'Size': 64,
}
for pointer in pointers
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.pointers_dir(pkg_name).path,
}
)
def test_build_default_registry(self):
"""
build() dumps the manifest to location specified by 'default_local_registry' in config.
"""
# Create a dummy file to add to the package.
test_file_name = 'bar'
test_file = Path(test_file_name).resolve()
test_file.write_text('test_file_content_string')
pkg_name = 'Quilt/Test'
def patch_get_from_config(registry_path):
return patch(
'quilt3.backends.get_from_config',
wraps=quilt3.util.get_from_config,
side_effect=lambda key: registry_path.as_uri() if key == 'default_local_registry' else mock.DEFAULT,
)
for suffix in ('suffix1', 'suffix2'):
local_registry_path = Path.cwd() / LOCAL_REGISTRY / suffix
with patch_get_from_config(local_registry_path) as mocked_get_from_config:
local_registry = self.LocalPackageRegistryDefault(PhysicalKey.from_path(local_registry_path))
new_pkg = Package()
# Build a new package into the local registry.
new_pkg = new_pkg.set('foo', test_file_name)
top_hash = new_pkg.build(pkg_name)
mocked_get_from_config.assert_any_call('default_local_registry')
# Verify manifest is registered by hash.
with open(local_registry.manifest_pk(pkg_name, top_hash).path) as fd:
pkg = Package.load(fd)
assert PhysicalKey.from_path(test_file) == pkg['foo'].physical_key
# Verify latest points to the new location.
assert Path(local_registry.pointer_latest_pk(pkg_name).path).read_text() == top_hash
@patch('quilt3.Package._browse', lambda name, registry, top_hash: Package())
def test_default_install_location(self):
"""Verify that pushes to the default local install location work as expected"""
self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')
with patch('quilt3.Package._build') as build_mock:
pkg_name = 'Quilt/nice-name'
Package.install(pkg_name, registry='s3://my-test-bucket')
build_mock.assert_called_once_with(
pkg_name,
registry=self.LocalPackageRegistryDefault(
PhysicalKey.from_url(quilt3.util.get_install_location())
),
message=None
)
def test_read_manifest(self):
""" Verify reading serialized manifest from disk. """
with open(LOCAL_MANIFEST) as fd:
pkg = Package.load(fd)
out_path = 'new_manifest.jsonl'
with open(out_path, 'w') as fd:
pkg.dump(fd)
# Insepct the jsonl to verify everything is maintained, i.e.
# that load/dump results in an equivalent set.
# todo: Use load/dump once __eq__ implemented.
with open(LOCAL_MANIFEST) as fd:
original_set = list(jsonlines.Reader(fd))
with open(out_path) as fd:
written_set = list(jsonlines.Reader(fd))
assert len(original_set) == len(written_set)
if os.name != 'nt':
# TODO: LOCAL_MANIFEST contains paths like file:///foo -
# but they're not valid absolute paths on Windows. What do we do?
assert sorted(original_set, key=lambda k: k.get('logical_key', 'manifest')) \
== sorted(written_set, key=lambda k: k.get('logical_key', 'manifest'))
@pytest.mark.usefixtures('isolate_packages_cache')
def test_remote_browse(self):
""" Verify loading manifest from s3 """
registry = 's3://test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/test'
top_hash = 'abcdefgh' * 8
# Make the first request.
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, top_hash=top_hash, manifest=REMOTE_MANIFEST.read_bytes())
pkg = Package.browse('Quilt/test', registry=registry)
assert 'foo' in pkg
# Make the second request. Gets "latest" - but the rest should be cached.
self.setup_s3_stubber_pkg_install(pkg_registry, pkg_name, top_hash=top_hash)
pkg2 = Package.browse(pkg_name, registry=registry)
assert 'foo' in pkg2
# Make another request with a top hash. Everything should be cached.
pkg3 = Package.browse(pkg_name, top_hash=top_hash, registry=registry)
assert 'foo' in pkg3
# Make a request with a short hash.
self.setup_s3_stubber_list_top_hash_candidates(pkg_registry, pkg_name, (top_hash, 'a' * 64))
pkg3 = Package.browse(pkg_name, top_hash='abcdef', registry=registry)
assert 'foo' in pkg3
# Make a request with a bad short hash.
with pytest.raises(QuiltException, match='Invalid hash'):
Package.browse(pkg_name, top_hash='abcde', registry=registry)
with pytest.raises(QuiltException, match='Invalid hash'):
Package.browse(pkg_name, top_hash='a' * 65, registry=registry)
# Make a request with a non-existant short hash.
self.setup_s3_stubber_list_top_hash_candidates(pkg_registry, pkg_name, (top_hash, 'a' * 64))
with pytest.raises(QuiltException, match='Found zero matches'):
Package.browse(pkg_name, top_hash='123456', registry=registry)
def test_install_restrictions(self):
"""Verify that install can only operate remote -> local."""
# disallow installs which send package data to a remote registry
with pytest.raises(QuiltException):
quilt3.Package.install('Quilt/nice-name', dest='s3://test-bucket')
# disallow installs which send the package manifest to a remote registry
with pytest.raises(QuiltException):
quilt3.Package.install('Quilt/nice-name', dest_registry='s3://test-bucket')
def test_package_fetch(self):
""" Package.fetch() on nested, relative keys """
package_ = Package().set_dir('/', DATA_DIR / 'nested')
out_dir = 'output'
new_package_ = package_.fetch(out_dir)
expected = {'one.txt': '1', 'two.txt': '2', 'three.txt': '3'}
file_count = 0
for dirpath, _, files in os.walk(out_dir):
for name in files:
file_count += 1
with open(os.path.join(dirpath, name)) as file_:
assert name in expected, 'unexpected file: {}'.format(name)
contents = file_.read().strip()
assert contents == expected[name], \
'unexpected contents in {}: {}'.format(name, contents)
assert file_count == len(expected), \
'fetch wrote {} files; expected: {}'.format(file_count, expected)
# test that package re-rooting works as expected
out_dir_abs_path = pathlib.Path(out_dir).resolve()
for _, entry in new_package_.walk():
# relative_to will raise an exception if the first path is not inside the second path.
pathlib.Path(entry.physical_key.path).relative_to(out_dir_abs_path)
def test_package_fetch_default_dest(self):
"""Verify fetching a package to the default local destination."""
Package().set_dir('/', DATA_DIR / 'nested').fetch()
assert pathlib.Path('one.txt').exists()
assert pathlib.Path('sub/two.txt').exists()
assert pathlib.Path('sub/three.txt').exists()
def test_fetch(self):
""" Verify fetching a package entry. """
pkg = (
Package()
.set('foo', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})
.set('bar', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})
)
pkg['foo'].meta['target'] = 'unicode'
pkg['bar'].meta['target'] = 'unicode'
with open(DATA_DIR / 'foo.txt') as fd:
assert fd.read().replace('\n', '') == '123'
# Copy foo.text to bar.txt
pkg['foo'].fetch('data/bar.txt')
with open('data/bar.txt') as fd:
assert fd.read().replace('\n', '') == '123'
# Raise an error if you copy to yourself.
with pytest.raises(shutil.SameFileError):
pkg.set('foo', DATA_DIR / 'foo.txt')['foo'].fetch(DATA_DIR / 'foo.txt')
# The key gets re-rooted correctly.
pkg = quilt3.Package().set('foo', DATA_DIR / 'foo.txt')
new_pkg_entry = pkg['foo'].fetch('bar.txt')
assert new_pkg_entry.physical_key == PhysicalKey.from_path('bar.txt')
def test_fetch_default_dest(tmpdir):
"""Verify fetching a package entry to a default destination."""
with patch('quilt3.packages.copy_file') as copy_mock:
(Package()
.set('foo', os.path.join(os.path.dirname(__file__), 'data', 'foo.txt'))['foo']
.fetch())
filepath = os.path.join(os.path.dirname(__file__), 'data', 'foo.txt')
copy_mock.assert_called_once_with(
PhysicalKey.from_path(filepath),
PhysicalKey.from_path('foo.txt')
)
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
def test_load_into_quilt(self):
""" Verify loading local manifest and data into S3. """
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
registry = 's3://my_test_bucket/'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/package'
def add_pkg_file(pkg, lk, filename, data, *, version):
path = Path(filename)
path.write_text(data)
pkg.set(lk, path)
self.setup_s3_stubber_upload_pkg_data(pkg_registry, pkg_name, lkey=lk, data=data, version=version)
new_pkg = Package()
# Create two dummy files to add to the package.
add_pkg_file(new_pkg, 'foo1', 'bar1', 'blah', version='v1')
add_pkg_file(new_pkg, 'foo2', 'bar2', 'omg', version='v1')
timestamp1 = 1234567890
self.setup_s3_stubber_push_manifest(
pkg_registry,
pkg_name,
'7fd8e7f49a344aadf4154a2210fe6b08297ecb23218d95027963dc0410548440',
pointer_name=str(timestamp1),
)
with patch('time.time', return_value=timestamp1), \
patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
remote_pkg = new_pkg.push(pkg_name, registry)
# Modify one file, and check that only that file gets uploaded.
add_pkg_file(remote_pkg, 'foo2', 'bar3', '!!!', version='v2')
timestamp2 = 1234567891
self.setup_s3_stubber_push_manifest(
pkg_registry,
pkg_name,
'd4efbb1734a53726d97086824d153e6cb5e9d8bc31d15ead0dbc019022cfe539',
pointer_name=str(timestamp2),
)
with patch('time.time', return_value=timestamp2), \
patch('quilt3.packages.DISABLE_TQDM', True), patch('quilt3.data_transfer.DISABLE_TQDM', True), \
patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
stderr = io.StringIO()
with redirect_stderr(stderr), patch('quilt3.packages.DISABLE_TQDM', True):
remote_pkg.push(pkg_name, registry)
assert not stderr.getvalue()
def test_package_deserialize(self):
""" Verify loading data from a local file. """
pkg = (
Package()
.set('foo', DATA_DIR / 'foo.txt', {'user_meta_foo': 'blah'})
.set('bar', DATA_DIR / 'foo.unrecognized.ext')
.set('baz', DATA_DIR / 'foo.txt')
)
pkg.build('foo/bar')
pkg['foo'].meta['target'] = 'unicode'
assert pkg['foo'].deserialize() == '123\n'
assert pkg['baz'].deserialize() == '123\n'
with pytest.raises(QuiltException):
pkg['bar'].deserialize()
def test_package_entry_physical_keys(self):
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
entry = pkg['foo']
physical_key = entry.physical_key
with pytest.warns(RemovedInQuilt4Warning, match='PackageEntry.physical_keys is deprecated'):
physical_keys = entry.physical_keys
assert [physical_key] == physical_keys
def test_local_set_dir(self):
""" Verify building a package from a local directory. """
pkg = Package()
# Create some nested example files that contain their names.
foodir = pathlib.Path("foo_dir")
bazdir = pathlib.Path(foodir, "baz_dir")
bazdir.mkdir(parents=True, exist_ok=True)
with open('bar', 'w') as fd:
fd.write(fd.name)
with open('foo', 'w') as fd:
fd.write(fd.name)
with open(bazdir / 'baz', 'w') as fd:
fd.write(fd.name)
with open(foodir / 'bar', 'w') as fd:
fd.write(fd.name)
pkg = pkg.set_dir("/", ".", meta="test_meta")
assert PhysicalKey.from_path('foo') == pkg['foo'].physical_key
assert PhysicalKey.from_path('bar') == pkg['bar'].physical_key
assert PhysicalKey.from_path(bazdir / 'baz') == pkg['foo_dir/baz_dir/baz'].physical_key
assert PhysicalKey.from_path(foodir / 'bar') == pkg['foo_dir/bar'].physical_key
assert pkg.meta == "test_meta"
pkg = Package()
pkg = pkg.set_dir('/', 'foo_dir/baz_dir/')
# todo nested at set_dir site or relative to set_dir path.
assert PhysicalKey.from_path(bazdir / 'baz') == pkg['baz'].physical_key
pkg = Package()
pkg = pkg.set_dir('my_keys', 'foo_dir/baz_dir/')
# todo nested at set_dir site or relative to set_dir path.
assert PhysicalKey.from_path(bazdir / 'baz') == pkg['my_keys/baz'].physical_key
# Verify ignoring files in the presence of a dot-quiltignore
with open('.quiltignore', 'w') as fd:
fd.write('foo\n')
fd.write('bar')
pkg = Package()
pkg = pkg.set_dir("/", ".")
assert 'foo_dir' in pkg.keys()
assert 'foo' not in pkg.keys() and 'bar' not in pkg.keys()
with open('.quiltignore', 'w') as fd:
fd.write('foo_dir')
pkg = Package()
pkg = pkg.set_dir("/", ".")
assert 'foo_dir' not in pkg.keys()
with open('.quiltignore', 'w') as fd:
fd.write('foo_dir\n')
fd.write('foo_dir/baz_dir')
pkg = Package()
pkg = pkg.set_dir("/", ".")
assert 'foo_dir/baz_dir' not in pkg.keys() and 'foo_dir' not in pkg.keys()
pkg = pkg.set_dir("new_dir", ".", meta="new_test_meta")
assert PhysicalKey.from_path('foo') == pkg['new_dir/foo'].physical_key
assert PhysicalKey.from_path('bar') == pkg['new_dir/bar'].physical_key
assert pkg['new_dir'].meta == "new_test_meta"
# verify set_dir logical key shortcut
pkg = Package()
pkg.set_dir("/")
assert PhysicalKey.from_path('foo') == pkg['foo'].physical_key
assert PhysicalKey.from_path('bar') == pkg['bar'].physical_key
def test_s3_set_dir(self):
""" Verify building a package from an S3 directory. """
with patch('quilt3.packages.list_object_versions') as list_object_versions_mock:
pkg = Package()
list_object_versions_mock.return_value = ([
dict(Key='foo/a.txt', VersionId='xyz', IsLatest=True, Size=10),
dict(Key='foo/x/y.txt', VersionId='null', IsLatest=True, Size=10),
dict(Key='foo/z.txt', VersionId='123', IsLatest=False, Size=10),
], [])
pkg.set_dir('', 's3://bucket/foo/', meta='test_meta')
assert pkg['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'
assert pkg['x']['y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'
assert pkg.meta == "test_meta"
assert pkg['x']['y.txt'].size == 10 # GH368
list_object_versions_mock.assert_called_with('bucket', 'foo/')
list_object_versions_mock.reset_mock()
pkg.set_dir('bar', 's3://bucket/foo')
assert pkg['bar']['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'
assert pkg['bar']['x']['y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'
assert pkg['bar']['a.txt'].size == 10 # GH368
list_object_versions_mock.assert_called_with('bucket', 'foo/')
def test_set_dir_wrong_update_policy(self):
"""Verify non existing update policy raises value error."""
pkg = Package()
expected_err = "Update policy should be one of"
with pytest.raises(ValueError) as e:
pkg.set_dir("nested", DATA_DIR, update_policy='invalid_policy')
assert expected_err in str(e.value)
def test_package_entry_meta(self):
pkg = (
Package()
.set('foo', DATA_DIR / 'foo.txt', {'value': 'blah'})
.set('bar', DATA_DIR / 'foo.txt', {'value': 'blah2'})
)
pkg['foo']._meta['target'] = 'unicode'
pkg['bar']._meta['target'] = 'unicode'
assert pkg['foo'].meta == {'value': 'blah'}
assert pkg['bar'].meta == {'value': 'blah2'}
assert pkg['foo']._meta == {'target': 'unicode', 'user_meta': {'value': 'blah'}}
assert pkg['bar']._meta == {'target': 'unicode', 'user_meta': {'value': 'blah2'}}
pkg['foo'].set_meta({'value': 'other value'})
assert pkg['foo'].meta == {'value': 'other value'}
assert pkg['foo']._meta == {'target': 'unicode', 'user_meta': {'value': 'other value'}}
def local_manifest_timestamp_fixer(self, timestamp):
return patch('time.time', return_value=timestamp)
def test_list_local_packages(self):
"""Verify that list returns packages in the appdirs directory."""
assert not list(quilt3.list_packages())
assert not list(quilt3.list_package_versions('test/not-exists'))
pkg_names = ('Quilt/Foo', 'Quilt/Bar', 'Quilt/Test')
# Build a new package into the local registry.
timestamp = 1234567890
with self.local_manifest_timestamp_fixer(timestamp):
for pkg_name in pkg_names:
Package().build(pkg_name)
# Verify packages are returned.
assert sorted(quilt3.list_packages()) == sorted(pkg_names)
top_hash = '2a5a67156ca9238c14d12042db51c5b52260fdd5511b61ea89b58929d6e1769b'
expected_versions = [
(str(timestamp), top_hash),
]
if self.LocalPackageRegistryDefault.revision_pointers:
expected_versions.append(('latest', top_hash))
assert sorted(quilt3.list_package_versions(pkg_names[0])) == sorted(expected_versions)
# Verify specifying a local path explicitly works as expected.
assert sorted(quilt3.list_packages()) == sorted(quilt3.list_packages(LOCAL_REGISTRY.as_posix()))
def test_set_package_entry(self):
""" Set the physical key for a PackageEntry"""
pkg = (
Package()
.set('foo', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})
.set('bar', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})
)
pkg['foo'].meta['target'] = 'unicode'
pkg['bar'].meta['target'] = 'unicode'
# Build a dummy file to add to the map.
test_file = Path('bar.txt')
test_file.write_text('test_file_content_string')
pkg['bar'].set('bar.txt')
assert PhysicalKey.from_path(test_file) == pkg['bar'].physical_key
# Test shortcut codepath
pkg = Package().set('bar.txt')
assert PhysicalKey.from_path(test_file) == pkg['bar.txt'].physical_key
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
def test_set_package_entry_as_object(self):
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
pkg = Package()
nasty_string = 'a,"\tb'
num_col = [11, 22, 33]
str_col = ['a', 'b', nasty_string]
df = pd.DataFrame({'col_num': num_col, 'col_str': str_col})
# Test with serialization_dir set
pkg.set("mydataframe1.parquet", df, meta={'user_meta': 'blah'},
serialization_location=SERIALIZATION_DIR/"df1.parquet")
pkg.set("mydataframe2.csv", df, meta={'user_meta': 'blah2'},
serialization_location=SERIALIZATION_DIR/"df2.csv")
pkg.set("mydataframe3.tsv", df, meta={'user_meta': 'blah3'},
serialization_location=SERIALIZATION_DIR/"df3.tsv")
# Test without serialization_dir set
pkg.set("mydataframe4.parquet", df, meta={'user_meta': 'blah4'})
pkg.set("mydataframe5.csv", df, meta={'user_meta': 'blah5'})
pkg.set("mydataframe6.tsv", df, meta={'user_meta': 'blah6'})
for lk, entry in pkg.walk():
file_path = entry.physical_key.path
assert pathlib.Path(file_path).exists(), "The serialization files should exist"
pkg._fix_sha256()
for lk, entry in pkg.walk():
assert df.equals(entry.deserialize()), "The deserialized PackageEntry should be equal to the object " \
"that was serialized"
# Test that push cleans up the temporary files, if and only if the serialization_location was not set
with patch('quilt3.Package._push_manifest'), \
patch('quilt3.packages.copy_file_list', _mock_copy_file_list):
pkg.push('Quilt/test_pkg_name', 's3://test-bucket')
for lk in ["mydataframe1.parquet", "mydataframe2.csv", "mydataframe3.tsv"]:
file_path = pkg[lk].physical_key.path
assert pathlib.Path(file_path).exists(), "These files should not have been deleted during push()"
for lk in ["mydataframe4.parquet", "mydataframe5.csv", "mydataframe6.tsv"]:
file_path = pkg[lk].physical_key.path
assert not pathlib.Path(file_path).exists(), "These temp files should have been deleted during push()"
def test_tophash_changes(self):
test_file = Path('test.txt')
test_file.write_text('asdf', 'utf-8')
pkg = Package()
th1 = pkg.top_hash
pkg.set('asdf', test_file)
pkg.build('foo/bar')
th2 = pkg.top_hash
assert th1 != th2
test_file.write_text('jkl', 'utf-8')
pkg.set('jkl', test_file)
pkg.build('foo/bar')
th3 = pkg.top_hash
assert th1 != th3
assert th2 != th3
pkg.delete('jkl')
th4 = pkg.top_hash
assert th2 == th4
def test_top_hash_empty_build(self):
assert Package().build('pkg/test') == '2a5a67156ca9238c14d12042db51c5b52260fdd5511b61ea89b58929d6e1769b'
@patch('quilt3.workflows.validate', Mock(return_value='workflow data'))
def test_top_hash_empty_build_workflow(self):
assert Package().build('pkg/test') == 'd181e7fd54b64f7f61a3ec33753b93c748748d36fa1e8e6189d598697648a52f'
def test_keys(self):
pkg = Package()
assert not pkg.keys()
pkg.set('asdf', LOCAL_MANIFEST)
assert set(pkg.keys()) == {'asdf'}
pkg.set('jkl;', REMOTE_MANIFEST)
assert set(pkg.keys()) == {'asdf', 'jkl;'}
pkg.delete('asdf')
assert set(pkg.keys()) == {'jkl;'}
def test_iter(self):
pkg = Package()
assert not pkg
pkg.set('asdf', LOCAL_MANIFEST)
assert list(pkg) == ['asdf']
pkg.set('jkl;', REMOTE_MANIFEST)
assert set(pkg) == {'asdf', 'jkl;'}
def test_invalid_set_key(self):
"""Verify an exception when setting a key with a path object."""
pkg = Package()
with pytest.raises(TypeError):
pkg.set('asdf/jkl', Package())
def test_brackets(self):
pkg = Package()
pkg.set('asdf/jkl', LOCAL_MANIFEST)
pkg.set('asdf/qwer', LOCAL_MANIFEST)
pkg.set('qwer/asdf', LOCAL_MANIFEST)
assert set(pkg.keys()) == {'asdf', 'qwer'}
pkg2 = pkg['asdf']
assert set(pkg2.keys()) == {'jkl', 'qwer'}
assert pkg['asdf']['qwer'].get() == LOCAL_MANIFEST.as_uri()
assert pkg['asdf']['qwer'] == pkg['asdf/qwer'] == pkg[('asdf', 'qwer')]
assert pkg[[]] == pkg
pkg = (
Package()
.set('foo', DATA_DIR / 'foo.txt', {'foo': 'blah'})
)
pkg['foo'].meta['target'] = 'unicode'
pkg.build("Quilt/Test")
assert pkg['foo'].deserialize() == '123\n'
assert pkg['foo']() == '123\n'
with pytest.raises(KeyError):
pkg['baz']
with pytest.raises(TypeError):
pkg[b'asdf']
with pytest.raises(TypeError):
pkg[0]
def _test_list_remote_packages_setup_stubber(self, pkg_registry, *, pkg_names):
pkg_name1, pkg_name2, pkg_name3 = pkg_names
pointers = (
(pkg_name1, '1549931300'),
(pkg_name1, '1549931634'),
(pkg_name1, 'latest'),
(pkg_name2, '1549931301'),
(pkg_name2, '1549931634'),
(pkg_name2, 'latest'),
(pkg_name3, '1549931300'),
(pkg_name3, '1549931635'),
(pkg_name3, 'latest'),
)
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [
{
'Key': pkg_registry.pointer_pk(pkg, pointer).path,
'Size': 64,
}
for pkg, pointer in pointers
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.pointers_global_dir.path,
}
)
def test_list_remote_packages(self):
"""Verify that listing remote packages works as expected."""
registry = 's3://my_test_bucket/'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_names = ('foo/bar', 'foo/bar1', 'foo1/bar')
self._test_list_remote_packages_setup_stubber(pkg_registry, pkg_names=pkg_names)
assert Counter(quilt3.list_packages(registry)) == Counter(pkg_names)
def test_validate_package_name(self):
validate_package_name("a/b")
validate_package_name("21312/bes")
with pytest.raises(QuiltException):
validate_package_name("b")
with pytest.raises(QuiltException):
validate_package_name("a/b/")
with pytest.raises(QuiltException):
validate_package_name("a\\/b")
with pytest.raises(QuiltException):
validate_package_name("a/b/c")
with pytest.raises(QuiltException):
validate_package_name("a/")
with pytest.raises(QuiltException):
validate_package_name("/b")
with pytest.raises(QuiltException):
validate_package_name("b")
def test_diff(self):
new_pkg = Package()
# Create a dummy file to add to the package.
test_file_name = 'bar'
with open(test_file_name, "w") as fd:
fd.write('test_file_content_string')
test_file = Path(fd.name)
# Build a new package into the local registry.
new_pkg = new_pkg.set('foo', test_file_name)
top_hash = new_pkg.build("Quilt/Test")
p1 = Package.browse('Quilt/Test')
p2 = Package.browse('Quilt/Test')
assert p1.diff(p2) == ([], [], [])
def test_dir_meta(self):
test_meta = {'test': 'meta'}
pkg = Package()
pkg.set('asdf/jkl', LOCAL_MANIFEST)
pkg.set('asdf/qwer', LOCAL_MANIFEST)
pkg.set('qwer/asdf', LOCAL_MANIFEST)
pkg.set('qwer/as/df', LOCAL_MANIFEST)
pkg.build('Quilt/Test')
assert pkg['asdf'].meta == {}
assert pkg.meta == {}
assert pkg['qwer']['as'].meta == {}
pkg['asdf'].set_meta(test_meta)
assert pkg['asdf'].meta == test_meta
pkg['qwer']['as'].set_meta(test_meta)
assert pkg['qwer']['as'].meta == test_meta
pkg.set_meta(test_meta)
assert pkg.meta == test_meta
dump_path = 'test_meta'
with open(dump_path, 'w') as f:
pkg.dump(f)
with open(dump_path) as f:
pkg2 = Package.load(f)
assert pkg2['asdf'].meta == test_meta
assert pkg2['qwer']['as'].meta == test_meta
assert pkg2.meta == test_meta
def test_top_hash_stable(self):
"""Ensure that top_hash() never changes for a given manifest"""
top_hash = '3426a3f721e41a1d83174c691432a39ff13720426267fc799dccf3583153e850'
manifest_path = DATA_DIR / 'top_hash_test_manifest.jsonl'
pkg = Package._from_path(manifest_path)
assert pkg.top_hash == top_hash, f'Unexpected top_hash for {manifest_path}'
pkg['b'].set_meta({'key': 'value'})
# Currently dir-level metadata doesn't affect top hash, though it should.
assert pkg.top_hash == top_hash
def test_local_package_delete(self):
"""Verify local package delete works."""
top_hash = Package().build("Quilt/Test")
assert 'Quilt/Test' in quilt3.list_packages()
quilt3.delete_package('Quilt/Test')
assert 'Quilt/Test' not in quilt3.list_packages()
def test_local_delete_package_revision(self):
pkg_name = 'Quilt/Test'
top_hash1 = 'top_hash1'
top_hash2 = 'top_hash2'
top_hash3 = 'top_hash3'
top_hashes = (top_hash1, top_hash2, top_hash3)
for i, top_hash in enumerate(top_hashes):
with patch('quilt3.Package.top_hash', top_hash), \
patch('time.time', return_value=i):
Path(top_hash).write_text(top_hash)
Package().set(top_hash, top_hash).build(pkg_name)
# All is set up correctly.
assert pkg_name in quilt3.list_packages()
assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == set(top_hashes)
assert Package.browse(pkg_name)[top_hash3].get_as_string() == top_hash3
# Remove latest revision, latest now points to the previous one.
quilt3.delete_package(pkg_name, top_hash=top_hash3)
assert pkg_name in quilt3.list_packages()
assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == {top_hash1, top_hash2}
assert Package.browse(pkg_name)[top_hash2].get_as_string() == top_hash2
# Remove non-latest revision, latest stays the same.
quilt3.delete_package(pkg_name, top_hash=top_hash1)
assert pkg_name in quilt3.list_packages()
assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == {top_hash2}
assert Package.browse(pkg_name)[top_hash2].get_as_string() == top_hash2
# Remove the last revision, package is not listed anymore.
quilt3.delete_package(pkg_name, top_hash=top_hash2)
assert pkg_name not in quilt3.list_packages()
assert not list(quilt3.list_package_versions(pkg_name))
def _test_remote_package_delete_setup_stubber(self, pkg_registry, pkg_name, *, pointers):
self.setup_s3_stubber_list_pkg_pointers(pkg_registry, pkg_name, pointers=pointers)
for pointer in pointers:
self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer=pointer)
def test_remote_package_delete(self):
"""Verify remote package delete works."""
registry = 's3://test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Test'
self._test_remote_package_delete_setup_stubber(pkg_registry, pkg_name, pointers=('0', 'latest'))
quilt3.delete_package(pkg_name, registry=registry)
def _test_remote_revision_delete_setup_stubber(self, pkg_registry, pkg_name, *, top_hashes, latest, remove,
new_latest):
pointers = {str(i): top_hash for top_hash, i in top_hashes.items()}
pointers['latest'] = latest
self.setup_s3_stubber_list_pkg_pointers(pkg_registry, pkg_name, pointers=pointers)
for pointer, top_hash in pointers.items():
self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer=pointer, top_hash=top_hash)
self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer=str(top_hashes[remove]))
if latest == remove:
self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer='latest')
if new_latest:
self.s3_stubber.add_response(
method='head_object',
service_response={
'ContentLength': len(new_latest),
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_pk(pkg_name, str(top_hashes[new_latest])).path,
}
)
self.s3_stubber.add_response(
method='copy_object',
service_response={},
expected_params={
'CopySource': {
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_pk(pkg_name, str(top_hashes[new_latest])).path,
},
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_latest_pk(pkg_name).path,
}
)
def test_remote_delete_package_revision(self):
self.patch_s3_registry('resolve_top_hash', lambda self, pkg_name, top_hash: top_hash)
registry = 's3://test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Test'
top_hash1 = 'top_hash1'
top_hash2 = 'top_hash2'
top_hash3 = 'top_hash3'
top_hashes = {
top_hash1: 1,
top_hash2: 2,
top_hash3: 3,
}
self._test_remote_revision_delete_setup_stubber(
pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash3, new_latest=top_hash2, remove=top_hash3)
quilt3.delete_package(pkg_name, top_hash=top_hash3, registry=registry)
top_hashes.pop(top_hash3)
self._test_remote_revision_delete_setup_stubber(
pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash2, new_latest=None, remove=top_hash1)
quilt3.delete_package(pkg_name, top_hash=top_hash1, registry=registry)
top_hashes.pop(top_hash1)
self._test_remote_revision_delete_setup_stubber(
pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash2, new_latest=None, remove=top_hash2)
quilt3.delete_package(pkg_name, top_hash=top_hash2, registry=registry)
def test_push_restrictions(self):
p = Package()
# disallow pushing not to the top level of a remote S3 registry
with pytest.raises(QuiltException):
p.push('Quilt/Test', 's3://test-bucket/foo/bar')
# disallow pushing to the local filesystem (use install instead)
with pytest.raises(QuiltException):
p.push('Quilt/Test', './')
# disallow pushing the package manifest to remote but package data to local
with pytest.raises(QuiltException):
p.push('Quilt/Test', 's3://test-bucket', dest='./')
# disallow pushing the pacakge manifest to remote but package data to a different remote
with pytest.raises(QuiltException):
p.push('Quilt/Test', 's3://test-bucket', dest='s3://other-test-bucket')
@patch('quilt3.workflows.validate', return_value=None)
def test_commit_message_on_push(self, mocked_workflow_validate):
""" Verify commit messages populate correctly on push."""
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
with patch('quilt3.packages.copy_file_list', _mock_copy_file_list), \
patch('quilt3.Package._push_manifest') as push_manifest_mock, \
patch('quilt3.Package._calculate_top_hash', return_value=mock.sentinel.top_hash):
with open(REMOTE_MANIFEST) as fd:
pkg = Package.load(fd)
pkg.push('Quilt/test_pkg_name', 's3://test-bucket', message='test_message')
registry = self.S3PackageRegistryDefault(PhysicalKey.from_url('s3://test-bucket'))
message = 'test_message'
push_manifest_mock.assert_called_once_with(
'Quilt/test_pkg_name',
registry,
mock.sentinel.top_hash,
)
mocked_workflow_validate.assert_called_once_with(
registry=registry,
workflow=...,
meta={},
message=message,
)
def test_overwrite_dir_fails(self):
with pytest.raises(QuiltException):
pkg = Package()
pkg.set('asdf/jkl', LOCAL_MANIFEST)
pkg.set('asdf', LOCAL_MANIFEST)
def test_overwrite_entry_fails(self):
with pytest.raises(QuiltException):
pkg = Package()
pkg.set('asdf', LOCAL_MANIFEST)
pkg.set('asdf/jkl', LOCAL_MANIFEST)
def test_siblings_succeed(self):
pkg = Package()
pkg.set('as/df', LOCAL_MANIFEST)
pkg.set('as/qw', LOCAL_MANIFEST)
def test_local_repr(self):
TEST_REPR = (
"(local Package)\n"
" └─asdf\n"
" └─path1/\n"
" └─asdf\n"
" └─qwer\n"
" └─path2/\n"
" └─first/\n"
" └─asdf\n"
" └─second/\n"
" └─asdf\n"
" └─qwer\n"
)
pkg = Package()
pkg.set('asdf', LOCAL_MANIFEST)
pkg.set('qwer', LOCAL_MANIFEST)
pkg.set('path1/asdf', LOCAL_MANIFEST)
pkg.set('path1/qwer', LOCAL_MANIFEST)
pkg.set('path2/first/asdf', LOCAL_MANIFEST)
pkg.set('path2/second/asdf', LOCAL_MANIFEST)
assert repr(pkg) == TEST_REPR
def test_remote_repr(self):
with patch('quilt3.packages.get_size_and_version', return_value=(0, '0')):
TEST_REPR = (
"(remote Package)\n"
" └─asdf\n"
)
pkg = Package()
pkg.set('asdf', 's3://my-bucket/asdf')
assert repr(pkg) == TEST_REPR
TEST_REPR = (
"(remote Package)\n"
" └─asdf\n"
" └─qwer\n"
)
pkg = Package()
pkg.set('asdf', 's3://my-bucket/asdf')
pkg.set('qwer', LOCAL_MANIFEST)
assert repr(pkg) == TEST_REPR
def test_repr_empty_package(self):
pkg = Package()
r = repr(pkg)
assert r == "(empty Package)"
def test_manifest(self):
pkg = Package().set_meta({'metadata': '💩'})
pkg.set('as/df', LOCAL_MANIFEST)
pkg.set('as/qw', LOCAL_MANIFEST)
top_hash = pkg.build('foo/bar')
manifest = list(pkg.manifest)
current_locale = locale.setlocale(locale.LC_ALL)
try:
for locale_name in ('C', ''):
with self.subTest(locale_name=locale_name):
locale.setlocale(locale.LC_ALL, locale_name)
pkg2 = Package.browse('foo/bar', top_hash=top_hash)
assert list(pkg2.manifest) == manifest
finally:
locale.setlocale(locale.LC_ALL, current_locale)
@patch('quilt3.Package._push_manifest', mock.MagicMock())
@patch('quilt3.packages.copy_file_list', mock.MagicMock())
@patch('quilt3.workflows.validate', mock.MagicMock(return_value='workflow data'))
def test_manifest_workflow(self):
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
for method in (Package.build, Package.push):
with self.subTest(method=method):
pkg = Package()
method(pkg, 'foo/bar', registry='s3://test-bucket')
data, = pkg.manifest
assert 'workflow' in data
assert data['workflow'] == "workflow data"
def test_map(self):
pkg = Package()
pkg.set('as/df', LOCAL_MANIFEST)
pkg.set('as/qw', LOCAL_MANIFEST)
assert set(pkg.map(lambda lk, entry: lk)) == {'as/df', 'as/qw'}
pkg['as'].set_meta({'foo': 'bar'})
assert set(pkg.map(lambda lk, entry: lk, include_directories=True)) ==\
{'as/df', 'as/qw', 'as/'}
def test_filter(self):
pkg = Package()
pkg.set('a/df', LOCAL_MANIFEST)
pkg.set('a/qw', LOCAL_MANIFEST)
p_copy = pkg.filter(lambda lk, entry: lk == 'a/df')
assert list(p_copy) == ['a'] and list(p_copy['a']) == ['df']
pkg = Package()
pkg.set('a/df', LOCAL_MANIFEST)
pkg.set('a/qw', LOCAL_MANIFEST)
pkg.set('b/df', LOCAL_MANIFEST)
pkg['a'].set_meta({'foo': 'bar'})
pkg['b'].set_meta({'foo': 'bar'})
p_copy = pkg.filter(lambda lk, entry: lk == 'a/', include_directories=True)
assert list(p_copy) == []
p_copy = pkg.filter(lambda lk, entry: lk == 'a/' or lk == 'a/df',
include_directories=True)
assert list(p_copy) == ['a'] and list(p_copy['a']) == ['df']
@pytest.mark.usefixtures('clear_data_modules_cache')
def test_import(self):
with patch('quilt3.Package._browse') as browse_mock, \
patch.object(self.LocalPackageRegistryDefault, 'list_packages') as list_packages_mock:
browse_mock.return_value = quilt3.Package()
list_packages_mock.return_value = ['foo/bar', 'foo/baz']
from quilt3.data.foo import bar
assert isinstance(bar, Package)
browse_mock.assert_has_calls(
[call('foo/baz', registry=ANY), call('foo/bar', registry=ANY)], any_order=True
)
from quilt3.data import foo
assert hasattr(foo, 'bar') and hasattr(foo, 'baz')
def test_invalid_key(self):
pkg = Package()
with pytest.raises(QuiltException):
pkg.set('', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo/', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo', './')
with pytest.raises(QuiltException):
pkg.set('foo', os.path.dirname(__file__))
# we do not allow '.' or '..' files or filename separators
with pytest.raises(QuiltException):
pkg.set('.', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('..', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('./foo', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('../foo', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo/.', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo/..', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo/./bar', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('foo/../bar', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('s3://foo/.', LOCAL_MANIFEST)
with pytest.raises(QuiltException):
pkg.set('s3://foo/..', LOCAL_MANIFEST)
@pytest.mark.usefixtures('clear_data_modules_cache')
@pytest.mark.usefixtures('isolate_packages_cache')
def test_install(self):
self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(),
entries=(
('s3://my_bucket/my_data_pkg/bar.csv', b'a,b,c'),
('s3://my_bucket/my_data_pkg/baz/bat', b'Hello World!'),
('s3://my_bucket/my_data_pkg/foo', '💩'.encode()),
),
)
with patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
Package.install(pkg_name, registry=registry, dest='package')
p = Package.browse(pkg_name)
assert p['foo'].get() == 's3://my_bucket/my_data_pkg/foo'
# Check that the cache works.
local_path = pathlib.Path(p['foo'].get_cached_path())
assert local_path == pathlib.Path.cwd() / 'package/foo'
assert local_path.read_text('utf8') == '💩'
# Test that get_bytes and get_as_text works
assert p['foo'].get_bytes().decode("utf-8") == '💩'
assert p['foo'].get_as_string() == '💩'
# Check that moving the file invalidates the cache...
local_path.rename('foo2')
assert p['foo'].get_cached_path() is None
# ...but moving it back fixes it.
pathlib.Path('foo2').rename(local_path)
assert pathlib.Path(p['foo'].get_cached_path()) == local_path
# Check that changing the contents invalidates the cache.
local_path.write_text('omg')
assert p['foo'].get_cached_path() is None
# Check that installing the package again reuses the cached manifest and two objects - but not "foo".
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name,
entries=(
('s3://my_bucket/my_data_pkg/foo', '💩'.encode()),
),
)
with patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
Package.install(pkg_name, registry=registry, dest='package/')
# import works for installation outside named package directory
with patch('quilt3.Package._browse') as browse_mock:
browse_mock.return_value = quilt3.Package()
from quilt3.data.Quilt import Foo
assert isinstance(Foo, Package)
browse_mock.assert_called_once()
# make sure import works for an installed named package
pkg_name2 = 'test/foo'
same_manifest_path = (
pkg_registry.manifest_pk(pkg_name2, self.default_test_top_hash) ==
pkg_registry.manifest_pk(pkg_name, self.default_test_top_hash)
)
self.setup_s3_stubber_pkg_install(
pkg_registry,
pkg_name2,
# Manifest is cached on PackageRegistryV1, since it's on the same path.
manifest=None if same_manifest_path else REMOTE_MANIFEST.read_bytes(),
)
with patch('quilt3.data_transfer.MAX_CONCURRENCY', 1), \
tempfile.TemporaryDirectory() as tmp_dir, \
patch(
'quilt3.packages.get_install_location',
return_value=str(PhysicalKey.from_path(tmp_dir))
) as mocked_get_install_location:
Package.install(pkg_name2, registry=registry)
mocked_get_install_location.assert_called_once_with()
items = []
for dirpath, dirnames, filenames in os.walk(tmp_dir):
dirpath = pathlib.Path(dirpath)
for dirname in dirnames:
items.append((dirpath / dirname).relative_to(tmp_dir))
for filename in filenames:
items.append((dirpath / filename).relative_to(tmp_dir))
items.sort()
assert items == list(map(pathlib.Path, (
'test',
'test/foo',
'test/foo/bar.csv',
'test/foo/baz',
'test/foo/baz/bat',
'test/foo/foo',
)))
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.util.IS_CACHE_ENABLED', False)
@patch('quilt3.packages.ObjectPathCache')
def test_install_disabled_cache(self, object_path_cache_mock):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
# Install a package twice and make sure cache functions weren't called.
for x in range(2):
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(),
entries=(
('s3://my_bucket/my_data_pkg/bar.csv', b'a,b,c'),
('s3://my_bucket/my_data_pkg/baz/bat', b'Hello World!'),
('s3://my_bucket/my_data_pkg/foo', '💩'.encode()),
),
)
with patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
Package.install(pkg_name, registry=registry, dest='package')
object_path_cache_mock.get.assert_not_called()
object_path_cache_mock.set.assert_not_called()
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.util.IS_CACHE_ENABLED', False)
@patch('quilt3.packages.ObjectPathCache')
def test_package_entry_disabled_cache(self, object_path_cache_mock):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(),
)
pkg = Package.browse(pkg_name, registry=registry)
for lk, entry in pkg.walk():
assert entry.get_cached_path() is None
object_path_cache_mock.get.assert_not_called()
def test_install_subpackage_deprecated_and_new(self):
pkg_name = 'Quilt/Foo'
bucket = 'my-test-bucket'
path = 'baz'
dest = 'package'
with pytest.warns(RemovedInQuilt4Warning):
with pytest.raises(ValueError):
Package.install(f'{pkg_name}/{path}', registry=f's3://{bucket}', dest=dest, path=path)
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.data_transfer.MAX_CONCURRENCY', 1)
@patch('quilt3.packages.ObjectPathCache.set')
def test_install_subpackage_deprecated(self, mocked_cache_set):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
subpackage_path = 'baz'
entry_url = 's3://my_bucket/my_data_pkg/baz/bat'
entry_content = b'42'
entries = (
(entry_url, entry_content),
)
dest = 'package'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)
with pytest.warns(RemovedInQuilt4Warning):
Package.install(f'{pkg_name}/{subpackage_path}', registry=registry, dest=dest)
path = pathlib.Path.cwd() / dest / 'bat'
mocked_cache_set.assert_called_once_with(
entry_url,
PhysicalKey.from_path(path).path,
)
assert path.read_bytes() == entry_content
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.data_transfer.MAX_CONCURRENCY', 1)
@patch('quilt3.packages.ObjectPathCache.set')
def test_install_entry_deprecated(self, mocked_cache_set):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
subpackage_path = 'baz/bat'
entry_url = 's3://my_bucket/my_data_pkg/baz/bat'
entry_content = b'42'
entries = (
(entry_url, entry_content),
)
dest = 'package'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)
with pytest.warns(RemovedInQuilt4Warning):
Package.install(f'{pkg_name}/{subpackage_path}', registry=registry, dest=dest)
path = pathlib.Path.cwd() / dest / 'bat'
mocked_cache_set.assert_called_once_with(
entry_url,
PhysicalKey.from_path(path).path,
)
assert path.read_bytes() == entry_content
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.data_transfer.MAX_CONCURRENCY', 1)
@patch('quilt3.packages.ObjectPathCache.set')
def test_install_subpackage(self, mocked_cache_set):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
path = 'baz'
entry_url = 's3://my_bucket/my_data_pkg/baz/bat'
entry_content = b'42'
entries = (
(entry_url, entry_content),
)
dest = 'package'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)
Package.install(pkg_name, registry=registry, dest=dest, path=path)
path = pathlib.Path.cwd() / dest / 'bat'
mocked_cache_set.assert_called_once_with(
entry_url,
PhysicalKey.from_path(path).path,
)
assert path.read_bytes() == entry_content
@pytest.mark.usefixtures('isolate_packages_cache')
@patch('quilt3.data_transfer.MAX_CONCURRENCY', 1)
@patch('quilt3.packages.ObjectPathCache.set')
def test_install_entry(self, mocked_cache_set):
registry = 's3://my-test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))
pkg_name = 'Quilt/Foo'
path = 'baz/bat'
entry_url = 's3://my_bucket/my_data_pkg/baz/bat'
entry_content = b'42'
entries = (
(entry_url, entry_content),
)
dest = 'package'
self.setup_s3_stubber_pkg_install(
pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)
Package.install(pkg_name, registry=registry, dest=dest, path=path)
path = pathlib.Path.cwd() / dest / 'bat'
mocked_cache_set.assert_called_once_with(
entry_url,
PhysicalKey.from_path(path).path,
)
assert path.read_bytes() == entry_content
def test_install_bad_name(self):
with self.assertRaisesRegex(QuiltException, 'Invalid package name'):
Package().install('?')
def test_rollback(self):
p = Package()
p.set('foo', DATA_DIR / 'foo.txt')
p.build('quilt/tmp')
good_hash = p.top_hash
assert 'foo' in Package.browse('quilt/tmp')
p.delete('foo')
p.build('quilt/tmp')
assert 'foo' not in Package.browse('quilt/tmp')
Package.rollback('quilt/tmp', LOCAL_REGISTRY, good_hash)
assert 'foo' in Package.browse('quilt/tmp')
with self.assertRaises(QuiltException):
Package.rollback('quilt/tmp', LOCAL_REGISTRY, '12345678' * 8)
with self.assertRaises(QuiltException):
Package.rollback('quilt/blah', LOCAL_REGISTRY, good_hash)
def test_rollback_none_registry(self):
with pytest.raises(ValueError):
Package.rollback('quilt/tmp', None, '12345678' * 8)
def test_verify(self):
self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')
pkg = Package()
pkg.set('foo', b'Hello, World!')
pkg.build('quilt/test')
Package.install('quilt/test', LOCAL_REGISTRY, dest='test')
assert pkg.verify('test')
Path('test/blah').write_text('123')
assert not pkg.verify('test')
assert pkg.verify('test', extra_files_ok=True)
Path('test/foo').write_text('123')
assert not pkg.verify('test')
assert not pkg.verify('test', extra_files_ok=True)
Path('test/foo').write_text('Hello, World!')
Path('test/blah').unlink()
assert pkg.verify('test')
@patch('quilt3.packages.calculate_sha256')
def test_fix_sha256_fail(self, mocked_calculate_sha256):
data = b'Hello, World!'
pkg = Package()
pkg.set('foo', data)
_, entry = next(pkg.walk())
exc = Exception('test exception')
mocked_calculate_sha256.return_value = [exc]
with pytest.raises(quilt3.exceptions.PackageException) as excinfo:
pkg._fix_sha256()
mocked_calculate_sha256.assert_called_once_with([entry.physical_key], [len(data)])
assert entry.hash is None
assert excinfo.value.__cause__ == exc
@patch('quilt3.packages.calculate_sha256')
def test_fix_sha256(self, mocked_calculate_sha256):
data = b'Hello, World!'
pkg = Package()
pkg.set('foo', data)
_, entry = next(pkg.walk())
hash_ = object()
mocked_calculate_sha256.return_value = [hash_]
pkg._fix_sha256()
mocked_calculate_sha256.assert_called_once_with([entry.physical_key], [len(data)])
assert entry.hash == {'type': 'SHA256', 'value': hash_}
def test_resolve_hash_invalid_pkg_name(self):
with pytest.raises(QuiltException, match='Invalid package name'):
Package.resolve_hash('?', Mock(), Mock())
def _test_resolve_hash_without_pkg_name(self, hash_prefix, top_hash1):
msg = r"Calling resolve_hash\(\) without the 'name' parameter is deprecated."
with pytest.warns(RemovedInQuilt4Warning, match=msg):
assert Package.resolve_hash(LOCAL_REGISTRY, hash_prefix) == top_hash1
def test_resolve_hash(self):
pkg_name = 'Quilt/Test'
top_hash1 = 'top_hash11'
top_hash2 = 'top_hash22'
top_hash3 = 'top_hash13'
hash_prefix = 'top_hash1'
with pytest.raises(QuiltException, match='Found zero matches'):
Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix)
with patch('quilt3.Package.top_hash', top_hash1), \
patch('time.time', return_value=1):
Package().build(pkg_name)
with patch('quilt3.Package.top_hash', top_hash2), \
patch('time.time', return_value=2):
Package().build(pkg_name)
assert Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix) == top_hash1
self._test_resolve_hash_without_pkg_name(hash_prefix, top_hash1)
with patch('quilt3.Package.top_hash', top_hash3), \
patch('time.time', return_value=3):
Package().build(pkg_name)
with pytest.raises(QuiltException, match='Found multiple matches'):
Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix)
@patch('quilt3.Package._fix_sha256', wraps=quilt3.Package._fix_sha256)
@patch('quilt3.Package._build', wraps=quilt3.Package._build)
def test_workflow_validation_error(self, build_mock, fix_hashes):
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
for method in (pkg.build, pkg.push):
with self.subTest(method=method):
with patch(
'quilt3.workflows.validate',
side_effect=Exception('test exception')
) as workflow_validate_mock:
with pytest.raises(Exception) as excinfo:
method('test/pkg', registry='s3://test-bucket')
assert excinfo.value is workflow_validate_mock.side_effect
workflow_validate_mock.assert_called_once()
assert not build_mock.mock_calls
assert not fix_hashes.mock_calls
assert pkg._workflow is None
@patch('quilt3.packages.copy_file_list')
@patch('quilt3.workflows.validate', return_value=mock.sentinel.returned_workflow)
@patch('quilt3.Package._calculate_top_hash', mock.MagicMock(return_value=mock.sentinel.top_hash))
@patch('quilt3.Package._set_commit_message', mock.MagicMock())
def test_workflow_validation(self, workflow_validate_mock, copy_file_list_mock):
registry = 's3://test-bucket'
pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url('s3://test-bucket'))
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
for method in (Package.build, Package.push):
with self.subTest(method=method):
with patch('quilt3.Package._push_manifest') as push_manifest_mock:
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
method(pkg, 'test/pkg', registry)
workflow_validate_mock.assert_called_once_with(
registry=pkg_registry,
workflow=...,
meta={},
message=None,
)
assert pkg._workflow is mock.sentinel.returned_workflow
push_manifest_mock.assert_called_once()
workflow_validate_mock.reset_mock()
if method is Package.push:
copy_file_list_mock.assert_called_once()
copy_file_list_mock.reset_mock()
with self.subTest(method=method):
with patch('quilt3.Package._push_manifest') as push_manifest_mock:
pkg = Package().set('foo', DATA_DIR / 'foo.txt').set_meta(mock.sentinel.pkg_meta)
method(
pkg,
'test/pkg',
registry,
workflow=mock.sentinel.workflow,
message=mock.sentinel.message,
)
workflow_validate_mock.assert_called_once_with(
registry=pkg_registry,
workflow=mock.sentinel.workflow,
meta=mock.sentinel.pkg_meta,
message=mock.sentinel.message,
)
assert pkg._workflow is mock.sentinel.returned_workflow
push_manifest_mock.assert_called_once()
workflow_validate_mock.reset_mock()
if method is Package.push:
copy_file_list_mock.assert_called_once()
copy_file_list_mock.reset_mock()
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
def test_push_dest_fn_non_string(self):
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
for val in (None, 42):
with self.subTest(value=val):
with pytest.raises(TypeError) as excinfo:
pkg.push('foo/bar', registry='s3://test-bucket',
dest=(lambda v: lambda *args, **kwargs: v)(val))
assert 'str is expected' in str(excinfo.value)
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
def test_push_dest_fn_non_supported_uri(self):
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
for val in ('http://example.com', 'file:///bffd'):
with self.subTest(value=val):
with pytest.raises(quilt3.util.URLParseError):
pkg.push('foo/bar', registry='s3://test-bucket',
dest=(lambda v: lambda *args, **kwargs: v)(val))
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
def test_push_dest_fn_s3_uri_with_version_id(self):
pkg = Package().set('foo', DATA_DIR / 'foo.txt')
with pytest.raises(ValueError) as excinfo:
pkg.push('foo/bar', registry='s3://test-bucket', dest=lambda *args, **kwargs: 's3://bucket/ds?versionId=v')
assert 'URI must not include versionId' in str(excinfo.value)
@patch('quilt3.workflows.validate', mock.MagicMock(return_value=None))
@patch('quilt3.Package._calculate_top_hash', mock.MagicMock(return_value=mock.sentinel.top_hash))
def test_push_dest_fn(self):
pkg_name = 'foo/bar'
lk = 'foo'
pkg = Package().set(lk, DATA_DIR / 'foo.txt')
dest_bucket = 'new-bucket'
dest_key = 'new-key'
dest_fn = mock.MagicMock(return_value=f's3://{dest_bucket}/{dest_key}')
version = '1'
self.s3_stubber.add_response(
method='put_object',
service_response={
'VersionId': '1',
},
expected_params={
'Body': ANY,
'Bucket': dest_bucket,
'Key': dest_key,
}
)
push_manifest_mock = self.patch_s3_registry('push_manifest')
self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')
pkg.push(pkg_name, registry='s3://test-bucket', dest=dest_fn)
dest_fn.assert_called_once_with(lk, pkg[lk], mock.sentinel.top_hash)
push_manifest_mock.assert_called_once_with(pkg_name, mock.sentinel.top_hash, ANY)
assert Package.load(
BytesIO(push_manifest_mock.call_args[0][2])
)[lk].physical_key == PhysicalKey(dest_bucket, dest_key, version)
def test_package_dump_file_mode(self):
"""
Package.dump() works with both files opened in binary and text mode.
"""
meta = {'💩': '💩'}
pkg = Package().set_meta(meta)
for mode in 'bt':
with self.subTest(mode=mode):
fn = f'test-manifest-{mode}.jsonl'
with open(fn, f'w{mode}', **({'encoding': 'utf-8'} if mode == 't' else {})) as f:
pkg.dump(f)
with open(fn, encoding='utf-8') as f:
assert Package.load(f).meta == meta
def test_max_manifest_record_size(self):
with open(os.devnull, 'wb') as buf:
with mock.patch('quilt3.packages.MANIFEST_MAX_RECORD_SIZE', 1):
with pytest.raises(QuiltException) as excinfo:
Package().dump(buf)
assert 'Size of manifest record for package metadata' in str(excinfo.value)
with mock.patch('quilt3.packages.MANIFEST_MAX_RECORD_SIZE', 10_000):
with pytest.raises(QuiltException) as excinfo:
Package().set('foo', DATA_DIR / 'foo.txt', {'user_meta': 'x' * 10_000}).dump(buf)
assert "Size of manifest record for entry with logical key 'foo'" in str(excinfo.value)
with pytest.raises(QuiltException) as excinfo:
Package().set_dir('bar', DATA_DIR / 'nested', meta={'user_meta': 'x' * 10_000}).dump(buf)
assert "Size of manifest record for entry with logical key 'bar/'" in str(excinfo.value)
# This would fail if non-ASCII chars were encoded using escape sequences.
Package().set_meta({'a': '💩' * 2_000}).dump(buf)
class PackageTestV2(PackageTest):
default_registry_version = 2
S3PackageRegistryDefault = S3PackageRegistryV2
LocalPackageRegistryDefault = LocalPackageRegistryV2
def _test_resolve_hash_without_pkg_name(self, hash_prefix, top_hash1):
with pytest.raises(TypeError, match='Package name is required'):
assert Package.resolve_hash(LOCAL_REGISTRY, hash_prefix) == top_hash1
def local_manifest_timestamp_fixer(self, timestamp):
wrapped = self.LocalPackageRegistryDefault.push_manifest
def wrapper(pkg_registry, pkg_name, top_hash, manifest_data):
wrapped(pkg_registry, pkg_name, top_hash, manifest_data)
os.utime(pkg_registry._manifest_parent_pk(pkg_name, top_hash).path, (timestamp, timestamp))
return patch.object(self.LocalPackageRegistryDefault, 'push_manifest', wrapper)
def _test_list_remote_packages_setup_stubber(self, pkg_registry, *, pkg_names):
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'CommonPrefixes': [
{'Prefix': pkg_registry.manifests_package_dir(pkg_name).path}
for pkg_name in pkg_names
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.manifests_global_dir.path,
'Delimiter': '/',
}
)
def _test_remote_package_delete_setup_stubber(self, pkg_registry, pkg_name, *, pointers):
top_hashes = (
'e99b760a05539460ac0a7349abb8f476e8c75282a38845fa828f8a5d28374303',
'20de5433549a4db332a11d8d64b934a82bdea8f144b4aecd901e7d4134f8e733',
)
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [
{
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
'Size': 64,
}
for top_hash in top_hashes
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,
}
)
for top_hash in top_hashes:
self.s3_stubber.add_response(
method='delete_object',
service_response={},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
}
)
super()._test_remote_package_delete_setup_stubber(pkg_registry, pkg_name, pointers=pointers)
def _test_remote_revision_delete_setup_stubber(self, pkg_registry, pkg_name, *, top_hashes, latest, remove,
new_latest):
self.s3_stubber.add_response(
method='delete_object',
service_response={},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.manifest_pk(pkg_name, remove).path,
}
)
self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer='latest', top_hash=latest)
if latest == remove:
self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer='latest')
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [
{
'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,
'Size': 64,
'LastModified': datetime.fromtimestamp(timestamp),
}
for top_hash, timestamp in top_hashes.items() if top_hash != remove
]
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,
}
)
if new_latest:
self.s3_stubber.add_response(
method='put_object',
service_response={},
expected_params={
'Body': new_latest.encode(),
'Bucket': pkg_registry.root.bucket,
'Key': pkg_registry.pointer_latest_pk(pkg_name).path,
}
)
self.s3_stubber.add_response(
method='list_objects_v2',
service_response={
'Contents': [],
},
expected_params={
'Bucket': pkg_registry.root.bucket,
'Prefix': pkg_registry.pointers_dir(pkg_name).path,
}
)
# The following tests were moved out of the PackageTest class to enable parametrization.
# see (https://docs.pytest.org/en/latest/unittest.html#pytest-features-in-unittest-testcase-subclasses)
@pytest.mark.parametrize(
'target_dir, update_policy, expected_one_byte, expected_two_byte, expected_three_byte, expected_keys',
[
('/', None, b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),
('/', 'incoming', b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),
('/', 'existing', b'1', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),
('', 'incoming', b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),
('', 'existing', b'1', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),
('sub/', 'incoming', b'one', b'two', b'three', {'one.txt', 'sub'}),
('sub/', 'existing', b'one', b'2', b'3', {'one.txt', 'sub'}),
('new-sub/', 'incoming', b'one', b'two', b'three', {'one.txt', 'sub', 'new-sub'}),
('new-sub/', 'existing', b'one', b'two', b'three', {'one.txt', 'sub', 'new-sub'}),
pytest.param('/', 'bad_policy', b'1', b'2', b'3', set(), marks=pytest.mark.xfail(raises=ValueError)),
]
)
def test_set_dir_update_policy(
target_dir: str,
update_policy: str,
expected_one_byte: bytes,
expected_two_byte: bytes,
expected_three_byte: bytes,
expected_keys: set
):
"""Verify building a package with update policy. """
nested_dir = DATA_DIR / 'nested'
pkg = Package()
pkg.set_dir("/", nested_dir, meta={'name': 'test_meta'})
assert set(pkg.keys()) == {'one.txt', 'sub'}
assert set(pkg['sub'].keys()) == {'two.txt', 'three.txt'}
assert pkg.meta == {'name': 'test_meta'}
nested_dir_2 = DATA_DIR / 'nested2'
if update_policy:
pkg.set_dir(target_dir, nested_dir_2, update_policy=update_policy)
else:
pkg.set_dir(target_dir, nested_dir_2)
assert set(pkg.keys()) == expected_keys
target_dir = target_dir.strip("/")
if target_dir:
assert pkg['one.txt'].get_bytes() == b'1'
assert set(pkg[target_dir].keys()) == {'one.txt', 'two.txt', 'three.txt'}
assert pkg[target_dir + '/one.txt'].get_bytes() == expected_one_byte
assert pkg[target_dir + '/two.txt'].get_bytes() == expected_two_byte
assert pkg[target_dir + '/three.txt'].get_bytes() == expected_three_byte
else:
assert pkg['one.txt'].get_bytes() == expected_one_byte
assert pkg['two.txt'].get_bytes() == expected_two_byte
assert pkg['three.txt'].get_bytes() == expected_three_byte
assert set(pkg['sub'].keys()) == {'two.txt', 'three.txt'}
@pytest.mark.parametrize(
'update_policy, expected_a_url, expected_xy_url',
[
('existing', 's3://bucket/foo/a.txt?versionId=xyz', 's3://bucket/foo/x/y.txt?versionId=null'),
('incoming', 's3://bucket/bar/a.txt?versionId=abc', 's3://bucket/bar/x/y.txt?versionId=null'),
(None, 's3://bucket/bar/a.txt?versionId=abc', 's3://bucket/bar/x/y.txt?versionId=null')
]
)
def test_set_dir_update_policy_s3(update_policy, expected_a_url, expected_xy_url):
with patch('quilt3.packages.list_object_versions') as list_object_versions_mock:
list_object_versions_mock.return_value = (
[
dict(Key='foo/a.txt', VersionId='xyz', IsLatest=True, Size=10),
dict(Key='foo/b.txt', VersionId='byc', IsLatest=True, Size=10),
dict(Key='foo/x/y.txt', VersionId='null', IsLatest=True, Size=10),
dict(Key='foo/z.txt', VersionId='123', IsLatest=False, Size=10),
],
[]
)
pkg = Package()
pkg.set_dir('', 's3://bucket/foo/', meta={'name': 'test_meta'})
assert 'c.txt' not in pkg.keys()
assert pkg['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'
assert pkg['b.txt'].get() == 's3://bucket/foo/b.txt?versionId=byc'
assert pkg['x/y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'
list_object_versions_mock.assert_called_once_with('bucket', 'foo/')
list_object_versions_mock.return_value = (
[
dict(Key='bar/a.txt', VersionId='abc', IsLatest=True, Size=10),
dict(Key='bar/c.txt', VersionId='cyb', IsLatest=True, Size=10),
dict(Key='bar/x/y.txt', VersionId='null', IsLatest=True, Size=10),
dict(Key='bar/z.txt', VersionId='123', IsLatest=True, Size=10),
],
[]
)
if update_policy:
pkg.set_dir('', 's3://bucket/bar', update_policy=update_policy)
else:
pkg.set_dir('', 's3://bucket/bar')
assert pkg['a.txt'].get() == expected_a_url
assert pkg['b.txt'].get() == 's3://bucket/foo/b.txt?versionId=byc'
assert pkg['c.txt'].get() == 's3://bucket/bar/c.txt?versionId=cyb'
assert pkg['x/y.txt'].get() == expected_xy_url
assert pkg['z.txt'].get() == 's3://bucket/bar/z.txt?versionId=123'
assert list_object_versions_mock.call_count == 2
list_object_versions_mock.assert_has_calls([call('bucket', 'foo/'), call('bucket', 'bar/')])
|
apache-2.0
|
QuLogic/cartopy
|
examples/lines_and_polygons/hurricane_katrina.py
|
2
|
3271
|
"""
Hurricane Katrina
-----------------
This example uses the power of Shapely to illustrate states that are likely to
have been significantly impacted by Hurricane Katrina.
"""
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import shapely.geometry as sgeom
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
def sample_data():
"""
Return a list of latitudes and a list of longitudes (lons, lats)
for Hurricane Katrina (2005).
The data was originally sourced from the HURDAT2 dataset from AOML/NOAA:
https://www.aoml.noaa.gov/hrd/hurdat/newhurdat-all.html on 14th Dec 2012.
"""
lons = [-75.1, -75.7, -76.2, -76.5, -76.9, -77.7, -78.4, -79.0,
-79.6, -80.1, -80.3, -81.3, -82.0, -82.6, -83.3, -84.0,
-84.7, -85.3, -85.9, -86.7, -87.7, -88.6, -89.2, -89.6,
-89.6, -89.6, -89.6, -89.6, -89.1, -88.6, -88.0, -87.0,
-85.3, -82.9]
lats = [23.1, 23.4, 23.8, 24.5, 25.4, 26.0, 26.1, 26.2, 26.2, 26.0,
25.9, 25.4, 25.1, 24.9, 24.6, 24.4, 24.4, 24.5, 24.8, 25.2,
25.7, 26.3, 27.2, 28.2, 29.3, 29.5, 30.2, 31.1, 32.6, 34.1,
35.6, 37.0, 38.6, 40.1]
return lons, lats
def main():
fig = plt.figure()
# to get the effect of having just the states without a map "background"
# turn off the background patch and axes frame
ax = fig.add_axes([0, 0, 1, 1], projection=ccrs.LambertConformal(),
frameon=False)
ax.patch.set_visible(False)
ax.set_extent([-125, -66.5, 20, 50], ccrs.Geodetic())
shapename = 'admin_1_states_provinces_lakes'
states_shp = shpreader.natural_earth(resolution='110m',
category='cultural', name=shapename)
lons, lats = sample_data()
ax.set_title('US States which intersect the track of '
'Hurricane Katrina (2005)')
# turn the lons and lats into a shapely LineString
track = sgeom.LineString(zip(lons, lats))
# buffer the linestring by two degrees (note: this is a non-physical
# distance)
track_buffer = track.buffer(2)
def colorize_state(geometry):
facecolor = (0.9375, 0.9375, 0.859375)
if geometry.intersects(track):
facecolor = 'red'
elif geometry.intersects(track_buffer):
facecolor = '#FF7E00'
return {'facecolor': facecolor, 'edgecolor': 'black'}
ax.add_geometries(
shpreader.Reader(states_shp).geometries(),
ccrs.PlateCarree(),
styler=colorize_state)
ax.add_geometries([track_buffer], ccrs.PlateCarree(),
facecolor='#C8A2C8', alpha=0.5)
ax.add_geometries([track], ccrs.PlateCarree(),
facecolor='none', edgecolor='k')
# make two proxy artists to add to a legend
direct_hit = mpatches.Rectangle((0, 0), 1, 1, facecolor="red")
within_2_deg = mpatches.Rectangle((0, 0), 1, 1, facecolor="#FF7E00")
labels = ['State directly intersects\nwith track',
'State is within \n2 degrees of track']
ax.legend([direct_hit, within_2_deg], labels,
loc='lower left', bbox_to_anchor=(0.025, -0.1), fancybox=True)
plt.show()
if __name__ == '__main__':
main()
|
lgpl-3.0
|
shangwuhencc/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
159
|
10196
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
|
bsd-3-clause
|
kai5263499/networkx
|
examples/graph/napoleon_russian_campaign.py
|
44
|
3216
|
#!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
|
bsd-3-clause
|
alexlib/openpiv-python
|
openpiv/test/test_windef.py
|
2
|
5243
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 14:33:21 2019
@author: Theo
"""
import numpy as np
from openpiv import windef
from openpiv.test import test_process
from openpiv import preprocess
import pathlib
import os
import matplotlib.pyplot as plt
frame_a, frame_b = test_process.create_pair(image_size=256)
shift_u, shift_v, threshold = test_process.shift_u, test_process.shift_v, \
test_process.threshold
# this test are created only to test the displacement evaluation of the
# function the validation methods are not tested here ant therefore
# are disabled.
settings = windef.Settings()
settings.windowsizes = (64,)
settings.overlap = (32,)
settings.num_iterations = 1
settings.correlation_method = 'circular'
settings.sig2noise_method = 'peak2peak'
settings.subpixel_method = 'gaussian'
settings.sig2noise_mask = 2
# circular cross correlation
def test_first_pass_circ():
""" test of the first pass """
x, y, u, v, s2n = windef.first_pass(
frame_a,
frame_b,
settings
)
print("\n", x, y, u, v, s2n)
assert np.mean(np.abs(u - shift_u)) < threshold
assert np.mean(np.abs(v - shift_v)) < threshold
def test_multi_pass_circ():
""" test fot the multipass """
settings.windowsizes = (64, 64, 16)
settings.overlap = (32, 32, 8)
settings.num_iterations = 2
settings.interpolation_order = 3
settings.validation_first_pass = True
settings.sig2noise_validate = False
# ettings.show_all_plots = True
x, y, u, v, s2n = windef.first_pass(
frame_a,
frame_b,
settings,
)
print("first pass\n")
print("\n", x, y, u, v, s2n)
assert np.allclose(u, shift_u, atol = threshold)
assert np.allclose(v, shift_v, atol = threshold)
if settings.image_mask:
image_mask = np.logical_and(mask_a, mask_b)
mask_coords = preprocess.mask_coordinates(image_mask)
# mark those points on the grid of PIV inside the mask
grid_mask = preprocess.prepare_mask_on_grid(x,y,mask_coords)
# mask the velocity
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
mask_coords = []
u = np.ma.masked_array(u, mask=np.ma.nomask)
v = np.ma.masked_array(v, mask=np.ma.nomask)
for i in range(1,settings.num_iterations):
x, y, u, v, s2n, _ = windef.multipass_img_deform(
frame_a,
frame_b,
i,
x,
y,
u,
v,
settings
)
print(f"Pass {i}\n")
print(x)
print(y)
print(u)
print(v)
print(s2n)
assert np.mean(np.abs(u - shift_u)) < threshold
assert np.mean(np.abs(v - shift_v)) < threshold
# the second condition is to check if the multipass is done.
# It need's a little numerical inaccuracy.
# linear cross correlation
def test_first_pass_lin():
""" test of the first pass """
settings.correlation_method = 'linear'
x, y, u, v, s2n = windef.first_pass(
frame_a,
frame_b,
settings,
)
print("\n", x, y, u, v, s2n)
assert np.mean(np.abs(u - shift_u)) < threshold
assert np.mean(np.abs(v - shift_v)) < threshold
def test_invert_and_piv():
""" Test windef.piv with invert option """
settings = windef.Settings()
'Data related settings'
# Folder with the images to process
settings.filepath_images = pathlib.Path(__file__).parent / '../examples/test1'
settings.save_path = '.'
# Root name of the output Folder for Result Files
settings.save_folder_suffix = 'test'
# Format and Image Sequence
settings.frame_pattern_a = 'exp1_001_a.bmp'
settings.frame_pattern_b = 'exp1_001_b.bmp'
settings.num_iterations = 1
settings.show_plot = False
settings.scale_plot = 100
settings.show_all_plots = False
settings.invert = True
windef.piv(settings)
def test_multi_pass_lin():
""" test fot the multipass """
settings.windowsizes = (64, 32, 16)
settings.overlap = (32, 16, 8)
settings.num_iterations = 1
settings.sig2noise_validate = True
settings.correlation_method = 'linear'
settings.normalized_correlation = True
settings.sig2noise_threshold = 1.0 # note the value for linear/normalized
x, y, u, v, s2n = windef.first_pass(
frame_a,
frame_b,
settings,
)
print("\n", x, y, u, v, s2n)
assert np.mean(np.abs(u - shift_u)) < threshold
assert np.mean(np.abs(v - shift_v)) < threshold
mask_coords = []
u = np.ma.masked_array(u, mask=np.ma.nomask)
v = np.ma.masked_array(v, mask=np.ma.nomask)
for i in range(1, settings.num_iterations):
x, y, u, v, s2n, _ = windef.multipass_img_deform(
frame_a,
frame_b,
i,
x,
y,
u,
v,
settings,
)
print(f"Iteration {i}")
print("\n", x, y, u, v, s2n)
assert np.allclose(u, shift_u, atol=threshold)
assert np.allclose(v, shift_v, atol=threshold)
# the second condition is to check if the multipass is done.
# It need's a little numerical inaccuracy.
|
gpl-3.0
|
WarrenWeckesser/scikits-image
|
skimage/viewer/utils/core.py
|
18
|
6556
|
import warnings
import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warnings.warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
|
bsd-3-clause
|
toobaz/pandas
|
pandas/core/computation/eval.py
|
2
|
12438
|
#!/usr/bin/env python
"""
Top level ``eval`` module.
"""
import tokenize
import warnings
from pandas.util._validators import validate_bool_kwarg
from pandas.core.computation.engines import _engines
from pandas.core.computation.scope import _ensure_scope
from pandas.io.formats.printing import pprint_thing
def _check_engine(engine):
"""
Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
Returns
-------
string engine
"""
from pandas.core.computation.check import _NUMEXPR_INSTALLED
if engine is None:
if _NUMEXPR_INSTALLED:
engine = "numexpr"
else:
engine = "python"
if engine not in _engines:
valid = list(_engines.keys())
raise KeyError(
"Invalid engine {engine!r} passed, valid engines are"
" {valid}".format(engine=engine, valid=valid)
)
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == "numexpr":
if not _NUMEXPR_INSTALLED:
raise ImportError(
"'numexpr' is not installed or an "
"unsupported version. Cannot use "
"engine='numexpr' for query/eval "
"if 'numexpr' is not installed"
)
return engine
def _check_parser(parser):
"""
Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
from pandas.core.computation.expr import _parsers
if parser not in _parsers:
raise KeyError(
"Invalid parser {parser!r} passed, valid parsers are"
" {valid}".format(parser=parser, valid=_parsers.keys())
)
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, "__getitem__"):
name = type(resolver).__name__
raise TypeError(
"Resolver of type {name!r} does not implement "
"the __getitem__ method".format(name=name)
)
def _check_expression(expr):
"""
Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""
Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr, stack_level, parser):
from pandas.core.computation.expr import tokenize_string
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != "pandas"
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = (
"The '@' prefix is not allowed in "
"top-level eval calls, \nplease refer to "
"your variables by name without the '@' "
"prefix"
)
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == "@":
raise SyntaxError(msg)
def eval(
expr,
parser="pandas",
engine=None,
truediv=True,
local_dict=None,
global_dict=None,
resolvers=(),
level=0,
target=None,
inplace=False,
):
"""
Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~DataFrame.query` method to inject the
``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : object, optional, default None
This is the target object for assignment. It is used when there is
variable assignment in the expression. If so, then `target` must
support item assignment with string keys, and if a copy is being
returned, it must also support `.copy()`.
inplace : bool, default False
If `target` is provided, and the expression mutates `target`, whether
to modify `target` inplace. Otherwise, return a copy of `target` with
the mutation.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Raises
------
ValueError
There are many instances where such an error can be raised:
- `target=None`, but the expression is multiline.
- The expression is multiline, but not all them have item assignment.
An example of such an arrangement is this:
a = b + 1
a + 2
Here, there are expressions on different lines, making it multiline,
but the last line has no variable assigned to the output of `a + 2`.
- `inplace=True`, but the expression is missing item assignment.
- Item assignment is provided, but the `target` does not support
string item assignment.
- Item assignment is provided and `inplace=False`, but the `target`
does not support the `.copy()` method
See Also
--------
DataFrame.query
DataFrame.eval
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
"""
from pandas.core.computation.expr import Expr
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(expr, str):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError(
"multi-line expressions are only valid in the "
"context of data, use DataFrame.eval"
)
ret = None
first_expr = True
target_modified = False
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(
level + 1,
global_dict=global_dict,
local_dict=local_dict,
resolvers=resolvers,
target=target,
)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None:
if multi_line:
raise ValueError(
"Multi-line expressions are only valid"
" if all expressions contain an assignment"
)
elif inplace:
raise ValueError("Cannot operate inplace " "if there is no assignment")
# assign if needed
assigner = parsed_expr.assigner
if env.target is not None and assigner is not None:
target_modified = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
try:
target = env.target.copy()
except AttributeError:
raise ValueError("Cannot return a copy of the target")
else:
target = env.target
# TypeError is most commonly raised (e.g. int, list), but you
# get IndexError if you try to do this assignment on np.ndarray.
# we will ignore numpy warnings here; e.g. if trying
# to use a non-numeric indexer
try:
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
target[assigner] = ret
except (TypeError, IndexError):
raise ValueError("Cannot assign expression output to target")
if not resolvers:
resolvers = ({assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if assigner in resolver:
resolver[assigner] = ret
break
else:
resolvers += ({assigner: ret},)
ret = None
first_expr = False
# We want to exclude `inplace=None` as being False.
if inplace is False:
return target if target_modified else ret
|
bsd-3-clause
|
syazdan25/SE17-Project
|
Pform3.py
|
1
|
3882
|
from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
import nltk
import numpy
from nltk.classify import SklearnClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import SVC
import webbrowser
# App config.
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
class ReusableForm(Form):
name = TextField('Name:', validators=[validators.required()])
@app.route("/", methods=['GET', 'POST'])
def hello():
print ("hello")
f = open("Training_amazon_data.txt")
pos_tweets = list()
neg_tweets = list()
for line in f:
words = line.split("\t")
if words[1] == '0\n' or words[1] == '0':
neg_tweets.append(words)
else:
pos_tweets.append(words)
f.close()
tweets = []
for (words, sentiment) in pos_tweets + neg_tweets:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
word_features = get_word_features(get_words_in_tweets(tweets))
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
training_set = nltk.classify.apply_features(extract_features, tweets)
#classifie = nltk.NaiveBayesClassifier.train(training_set)
classifie = SklearnClassifier(BernoulliNB()).train(training_set)
form = ReusableForm(request.form)
print (form.errors)
if request.method == 'POST':
name=request.form['name']
file = open("Testing_amazon_data.txt")
resfile = open("result.txt", "w")
predicted = numpy.array([]);
actual = numpy.array([]);
index = 0
for line in file:
label = classifie.classify(extract_features(line.split()))
words = line.split("\t")
actual = numpy.insert(actual, index, int(words[1]))
predicted = numpy.insert(predicted, index, int(label))
index = index + 1
resfile.write(line)
resfile.write(label)
file.close()
f = open("Stats.txt", "w")
confusion = actual - predicted
FP = numpy.count_nonzero(confusion==-1)
FN = numpy.count_nonzero(confusion==1)
print(FP)
#print(FN)
Accuracy = numpy.count_nonzero(confusion==0)/(numpy.count_nonzero(confusion==0) + FP+ FN)
print (Accuracy)
f.write(Accuracy)
f.write(line)
f.write(FP)
f.write(line)
f.write(FN)
f.close()
#if
#TP =
#TN
#if (classifie.classify(extract_features(name.split())) == '1'):
# review = 'Positive'
# else:
# review = 'Negative'
name = classifie.classify(extract_features(name.split()))
print (name)
if form.validate():
# Save the comment here.
flash(name)
else:
flash('Error: All the form fields are required. ')
return render_template('analysis.html', form=form)
if __name__ == "__main__":
url = 'http://127.0.0.1:5000'
webbrowser.open_new(url)
app.run()
|
gpl-3.0
|
ChanderG/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
132
|
25860
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
|
bsd-3-clause
|
jjx02230808/project0223
|
examples/calibration/plot_calibration_multiclass.py
|
272
|
6972
|
"""
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
|
bsd-3-clause
|
Jailander/COSMOS
|
kriging_exploration/src/kriging_exploration/canvas.py
|
1
|
7053
|
#import urllib
#import math
import numpy as np
import cv2
from matplotlib import colors as mcolors
from map_coords import MapCoords
class ViewerCanvas(object):
def __init__(self, shape, centre, res):
self.size = shape[0]
self.res = res
self.centre = centre
self.image = np.zeros(shape, dtype=np.uint8)
self.shape = shape
def _coord2pix(self, point):
dnorth = ((self.centre.northing- point.northing)/self.res) + (self.size/2)
deast = ((point.easting - self.centre.easting)/self.res) + (self.size/2)
return deast, dnorth
def _pix2coord(self, x, y):
xcord = (x - (self.size/2))*self.res
ycord = -(y - (self.size/2))*self.res
click_coord = self.centre._get_rel_point(xcord,ycord)
return click_coord
def _latlong2pix(self, lat, lon):
point = MapCoords(lat, lon)
deast, dnorth = self._coord2pix(point)
return deast, dnorth
def clear_image(self):
self.image = np.zeros(self.shape, dtype=np.uint8)
def draw_coordinate(self, coord, colour, size=6, thickness=2, alpha=128):
mx, my =self._coord2pix(coord)
b = [int(255*x) for x in mcolors.hex2color(mcolors.cnames[colour])]
b = b[::-1]
b.append(alpha)
cv2.circle(self.image, (int(mx), int(my)), size, b, thickness)
def draw_waypoints(self, waypoints, colour,thickness=2):
for i in waypoints:
mx0, my0 = self._coord2pix(i.coord)
cv2.circle(self.image, (int(mx0), int(my0)), 2, colour, thickness=thickness)
def draw_plan(self, waypoints, colour, thickness=2, alpha=128):
b = [int(255*x) for x in mcolors.hex2color(mcolors.cnames[colour])]
b = b[::-1]
b.append(alpha)
for i in range(0, len(waypoints)-1):
mx0, my0 =self._coord2pix(waypoints[i].coord)
mx1, my1 =self._coord2pix(waypoints[i+1].coord)
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), b, thickness=thickness)
def draw_grid(self, grid, cell_size, colour, thickness=2):
nx = len(grid)-1
ny = len(grid[0])-1
for i in range(0, len(grid[0])):
mx0, my0 =self._coord2pix(grid[0][i]._get_rel_point(-cell_size/2,-cell_size/2))
mx1, my1 =self._coord2pix(grid[nx][i]._get_rel_point(-cell_size/2,cell_size/2))
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
mx0, my0 =self._coord2pix(grid[0][ny]._get_rel_point(cell_size/2,-cell_size/2))
mx1, my1 =self._coord2pix(grid[nx][ny]._get_rel_point(cell_size/2,cell_size/2))
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
for i in range(0, len(grid)):
mx0, my0 =self._coord2pix(grid[i][0]._get_rel_point(-cell_size/2,-cell_size/2))
mx1, my1 =self._coord2pix(grid[i][ny]._get_rel_point(cell_size/2,-cell_size/2))
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
mx0, my0 =self._coord2pix(grid[nx][0]._get_rel_point(-cell_size/2,cell_size/2))
mx1, my1 =self._coord2pix(grid[nx][ny]._get_rel_point(cell_size/2,cell_size/2))
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
def draw_cell(self, cell, cell_size, colour, thickness=2):
mx0, my0 =self._coord2pix(cell._get_rel_point(-cell_size/2,-cell_size/2))
mx1, my1 =self._coord2pix(cell._get_rel_point(cell_size/2,cell_size/2))
cv2.rectangle(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
def draw_list_of_coords(self, list_of_coords, colour, size=6, thickness=2):
for i in list_of_coords:
mx, my = self._coord2pix(i)
cv2.circle(self.image, (int(mx), int(my)), size, colour, thickness)
def draw_polygon(self, list_of_coords, colour, thickness=2):
for i in range(1, len(list_of_coords)):
mx0, my0 =self._coord2pix(list_of_coords[i-1])
mx1, my1 =self._coord2pix(list_of_coords[i])
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
mx0, my0 =self._coord2pix(list_of_coords[0])
cv2.line(self.image, (int(mx0), int(my0)), (int(mx1), int(my1)), colour, thickness=thickness)
def draw_line(self, line, colour, size=3, thickness=2, alpha=128):
mx1, my1 =self._coord2pix(line[0])
mx2, my2 =self._coord2pix(line[1])
b = [int(255*x) for x in mcolors.hex2color(mcolors.cnames[colour])]
b = b[::-1]
b.append(alpha)
cv2.circle(self.image, (int(mx1), int(my1)), size, b, thickness)
cv2.circle(self.image, (int(mx2), int(my2)), size, b, thickness)
cv2.line(self.image, (int(mx1), int(my1)), (int(mx2), int(my2)), b, thickness=thickness)
def draw_legend(self, vmin, vmax, colmap, title="OUTPUTS"):
font = cv2.FONT_HERSHEY_SIMPLEX
if (vmax-vmin) > 1:
step = float(vmax - vmin)/float(600-40)
else:
vmax = vmax + 500
vmin = vmin - 500
step = float(vmax - vmin)/float(600-40)
if step>1.0:
ind = 0
while ind < 560:
# print int(vmin+(ind*step))
a= colmap.to_rgba(int(vmin+(ind*step)))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255),int(a[3]*255))
cv2.rectangle(self.image, (int(ind+40), int(510)), (int(ind+1+40), int(530)), b , thickness=-1)
ind+=1
else:
step=1/step
ind = 0
while ind < 560:
# print int(vmin+(ind*step))
a= colmap.to_rgba(int(vmin+(ind/step)))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255),int(a[3]*255))
cv2.rectangle(self.image, (int(ind+40), int(510)), (int(ind+1+40), int(530)), b , thickness=-1)
ind+=1
a= colmap.to_rgba(int(vmin))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*250))
cv2.putText(self.image, str(np.floor(vmin)) + " Kpa", (int(5), int(500)), font, 0.6, b, 2)
a= colmap.to_rgba(int(vmax))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*250))
tsz=cv2.getTextSize(str(np.ceil(vmax)) + " Kpa", font, 0.6, 2)
cv2.putText(self.image, str(np.ceil(vmax)) + " Kpa", (int(640)-tsz[0][0]-5, int(500)), font, 0.6, b, 2)
cv2.putText(self.image, title, (240,80), font, 0.8, (200, 200, 200,255), 2)
def put_text(self,text,colour=(200,200,200,200)):
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(self.image, text, (int(520), int(80)), font, 0.8, colour, 2)
|
mit
|
great-expectations/great_expectations
|
tests/data_context/conftest.py
|
1
|
6202
|
import os
import shutil
import pytest
import great_expectations as ge
from great_expectations.data_context.types.base import DataContextConfig
from great_expectations.data_context.util import file_relative_path
from tests.integration.usage_statistics.test_integration_usage_statistics import (
USAGE_STATISTICS_QA_URL,
)
@pytest.fixture()
def data_context_without_config_variables_filepath_configured(tmp_path_factory):
# This data_context is *manually* created to have the config we want, vs created with DataContext.create
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename="great_expectations_basic_without_config_variables_filepath.yml",
config_variables_fixture_filename=None,
)
return ge.data_context.DataContext(context_path)
@pytest.fixture()
def data_context_with_variables_in_config(tmp_path_factory, monkeypatch):
monkeypatch.setenv("FOO", "BAR")
monkeypatch.setenv("REPLACE_ME_ESCAPED_ENV", "ive_been_$--replaced")
# This data_context is *manually* created to have the config we want, vs created with DataContext.create
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename="great_expectations_basic_with_variables.yml",
config_variables_fixture_filename="config_variables.yml",
)
return ge.data_context.DataContext(context_path)
@pytest.fixture()
def data_context_with_variables_in_config_exhaustive(tmp_path_factory):
# This data_context is *manually* created to have the config we want, vs created with DataContext.create
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename="great_expectations_basic_with_exhaustive_variables.yml",
config_variables_fixture_filename="config_variables_exhaustive.yml",
)
return ge.data_context.DataContext(context_path)
def create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename,
config_variables_fixture_filename=None,
):
if config_variables_fixture_filename:
os.makedirs(context_path, exist_ok=True)
os.makedirs(os.path.join(context_path, "uncommitted"), exist_ok=True)
copy_relative_path(
f"../test_fixtures/{config_variables_fixture_filename}",
str(os.path.join(context_path, "uncommitted/config_variables.yml")),
)
copy_relative_path(
f"../test_fixtures/{ge_config_fixture_filename}",
str(os.path.join(context_path, "great_expectations.yml")),
)
else:
os.makedirs(context_path, exist_ok=True)
copy_relative_path(
f"../test_fixtures/{ge_config_fixture_filename}",
str(os.path.join(context_path, "great_expectations.yml")),
)
create_common_data_context_files(context_path, asset_config_path)
def create_common_data_context_files(context_path, asset_config_path):
os.makedirs(
os.path.join(asset_config_path, "mydatasource/mygenerator/my_dag_node"),
exist_ok=True,
)
copy_relative_path(
"../test_fixtures/"
"expectation_suites/parameterized_expectation_suite_fixture.json",
os.path.join(
asset_config_path, "mydatasource/mygenerator/my_dag_node/default.json"
),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
copy_relative_path(
"../test_fixtures/custom_pandas_dataset.py",
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
copy_relative_path(
"../test_fixtures/custom_sqlalchemy_dataset.py",
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
copy_relative_path(
"../test_fixtures/custom_sparkdf_dataset.py",
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
def copy_relative_path(relative_src, dest):
shutil.copy(file_relative_path(__file__, relative_src), dest)
@pytest.fixture
def basic_data_context_config():
return DataContextConfig(
**{
"commented_map": {},
"config_version": 2,
"plugins_directory": "plugins/",
"evaluation_parameter_store_name": "evaluation_parameter_store",
"validations_store_name": "does_not_have_to_be_real",
"expectations_store_name": "expectations_store",
"config_variables_file_path": "uncommitted/config_variables.yml",
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "expectations/",
},
},
"evaluation_parameter_store": {
"module_name": "great_expectations.data_context.store",
"class_name": "EvaluationParameterStore",
},
},
"data_docs_sites": {},
"validation_operators": {
"default": {
"class_name": "ActionListValidationOperator",
"action_list": [],
}
},
"anonymous_usage_statistics": {
"enabled": True,
"data_context_id": "6a52bdfa-e182-455b-a825-e69f076e67d6",
"usage_statistics_url": USAGE_STATISTICS_QA_URL,
},
}
)
|
apache-2.0
|
araichev/affordability_nz
|
notebooks/helpers.py
|
1
|
6985
|
"""
This module contains the functions that create most of data files for
a particular region of New Zealand.
It assumes that you have the created the following files for a region
<region> already::
|- data/
|- processed/
|- rents.csv
|- rental_areas.geojson
|- rental_area_points.geojson
|- <region>/
|- walking_commutes.csv
|- bicycling_commutes.csv
|- driving_commutes.csv
|- transit_commutes.csv
TODO:
- Add automated tests
"""
from typing import Optional, List
import json
import os
from pathlib import Path
import datetime as dt
import numpy as np
import pandas as pd
import geopandas as gpd
ROOT = Path(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
DATA_DIR = ROOT/'docs'/'data'
CRS_NZGD49 = 'epsg:27200'
CRS_NZTM = 'epsg:2193'
CRS_WGS84 = 'epsg:4326'
REGIONS = [
'auckland',
'canterbury',
'wellington',
]
MODES = [
'walking',
'bicycling',
'driving',
'transit',
]
# Cost in NZD/km. Get transit costs from an origin-destination matrix.
COST_BY_MODE = {
'walking': 0,
'bicycling': 0,
'driving': 0.274,
'transit': 0,
}
def get_secret(key, secrets_path=ROOT/'secrets.json'):
"""
Open the JSON file at ``secrets_path``, and return the value
corresponding to the given key.
"""
secrets_path = Path(secrets_path)
with secrets_path.open() as src:
secrets = json.load(src)
return secrets[key]
def get_path(key, region=None):
"""
Return the path (Path object) of the file corresponding to the given
key (string) and region (string).
"""
path = DATA_DIR/'processed'
error = ValueError('Invalid key-region pair ({!s}, {!s})'.format(
key, region))
if region is None:
if key == 'property_titles':
# Special case using collected data
path = DATA_DIR/'collected'/'nz-cadastral-titles-jan-10.gpkg'
elif key == 'au2001_csv':
path /= 'au2001.csv'
elif key == 'au2001':
path /= 'au2001.geojson'
elif key == 'rental_areas_csv':
path /= 'rental_areas.csv'
elif key == 'rental_areas':
path /= 'rental_areas.geojson'
elif key == 'rental_points':
path /= 'rental_points.geojson'
elif key == 'rents':
path /= 'rents.csv'
else:
raise error
else:
path /= region
if key == 'rental_areas':
path /= 'rental_areas.geojson'
elif key == 'rental_points':
path /= 'rental_points.geojson'
elif key == 'rents':
path /= 'rents.csv'
elif key == 'rents_json':
path /= 'rents.json'
elif key == 'commutes_walking':
path /= 'commutes_walking.csv'
elif key == 'commutes_bicycling':
path /= 'commutes_bicycling.csv'
elif key == 'commutes_driving':
path /= region/'commutes_driving.csv'
elif key == 'commutes_transit':
path /= 'commutes_transit.csv'
elif key == 'transit_costs':
path /= 'transit_costs.csv'
elif key == 'commute_costs':
path /= 'commute_costs.json'
else:
raise error
return path
def get_data(key, region=None):
"""
Return the data corresponding to the given key (string) and
region (string) and key.
"""
path = get_path(key, region)
if not path.exists:
raise ValueError('Data does not exist for key-region pair'
' ({!s}, {!s})'.format(key, region))
s = path.suffix
if s == '.csv':
result = pd.read_csv(path, dtype={'au2001': str})
elif s in ['.geojson', '.gpkg']:
result = gpd.read_file(str(path))
elif s == '.json':
with path.open() as src:
result = json.load(src)
return result
def get_latest_quarters(n: int, *, from_today=False) -> List[str]:
"""
Return a list of the latest ``n`` rental data quarters as
YYYY-MM-DD datestrings sorted chronologically.
Each quarter will be of the form YYYY-03-01, YYYY-06-01,
YYYY-09-01, or YYYY-12-01.
If ``from_today``, get the latest quarters theoretically possible from
today's date; otherwise, get them from the rental data.
"""
if from_today:
quarters = [q.strftime('%Y-%m') + '-01' for q in
pd.date_range(end=dt.datetime.now(), freq='Q', periods=n)]
else:
quarters = get_data('rents')['quarter'].unique()[-n:].tolist()
return quarters
def aggregate_rents(rents, date=None, groupby_cols=('rental_area',
'num_bedrooms')):
"""
Given a DataFrame of rents, group the rents by the given groupby
columns, recomputing the counts and means.
Return the resulting data frame, which have the following columns.
- the columns in ``groupby_cols``
- ``'territory'``
- ``'region'``
- ``'rent_count'``
- ``'rent_mean'``
- ``'rent_geo_mean'``
If a date (YYYY-MM-DD date string) is given, then first slice the
rents to calendar quarters equal to or later than the date.
"""
if date is not None:
f = rents.loc[lambda x: x.quarter >= date].copy()
else:
f = rents.copy()
def my_agg(group):
d = {}
if 'territory' not in groupby_cols:
d['territory'] = group['territory'].iat[0]
if 'region' not in groupby_cols:
d['region'] = group['region'].iat[0]
d['rent_count'] = group['rent_count'].sum()
d['rent_mean'] = (group['rent_mean']*group['rent_count']).sum()/\
d['rent_count']
if d['rent_count']:
d['rent_geo_mean'] = (group['rent_geo_mean']**(
group['rent_count']/d['rent_count'])).prod()
else:
d['rent_geo_mean'] = np.nan
return pd.Series(d)
return (
f
.groupby(list(groupby_cols))
.apply(my_agg)
.reset_index()
)
def nan_to_none(df):
"""
Replace the NaN values in the given DataFrame with None and return
the resulting DataFrame.
"""
return df.where((pd.notnull(df)), None)
def build_json_rents(rents):
"""
Given a DataFrame of rents of the form output by
:func:read_data('rents'), aggregate the rents by rental area
and number of bedrooms ('1', '2', '3', or '4'), and return the
result as a dictionary of the form
rental area -> num_bedrooms -> rent geometric mean.
Some of the mean rents could be ``None``.
"""
f = aggregate_rents(rents)
# Drop 5+ bedrooms and round to nearest dollar
f = f[f['num_bedrooms'] != '5+'].copy().round()
# Replace NaN with None to make JSON-compatible
f = nan_to_none(f)
# Save to dictionary of form rental area -> num_bedrooms -> rent geo mean
d = {area: dict(g[['num_bedrooms', 'rent_geo_mean']].values)
for area, g in f.groupby('rental_area')}
return d
|
mit
|
hsiaoyi0504/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
95
|
12156
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
ElDeveloper/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
117
|
4755
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
|
bsd-3-clause
|
rohit21122012/DCASE2013
|
runs/2016/baseline128/src/dataset.py
|
37
|
78389
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
|
mit
|
maestrotf/pymepps
|
pymepps/loader/filehandler/netcdfhandler.py
|
1
|
5732
|
#!/bin/env python
# -*- coding: utf-8 -*-
# """
# Created on 14.12.16
#
# Created for pymepps
#
# @author: Tobias Sebastian Finn, [email protected]
#
# Copyright (C) {2016} {Tobias Sebastian Finn}
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# """
# System modules
import logging
# External modules
import xarray as xr
import numpy as np
# Internal modules
from .filehandler import FileHandler
logger = logging.getLogger(__name__)
def cube_to_series(cube, var_name):
cleaned_dims = list(cube.dims)
if 'index' in cleaned_dims:
cleaned_dims.remove('index')
elif 'time' in cleaned_dims:
cleaned_dims.remove('time')
elif 'validtime' in cleaned_dims:
cleaned_dims.remove('validtime')
if cleaned_dims:
stacked = cube.stack(col=cleaned_dims)
data = stacked.to_pandas()
else:
data = cube.to_series()
data.name = var_name
return data
class NetCDFHandler(FileHandler):
def _get_varnames(self):
var_names = list(self.ds.data_vars)
return var_names
def is_type(self):
try:
self.open()
self.close()
return True
except OSError:
return False
def open(self):
if self.ds is None:
self.ds = xr.open_dataset(self.file, engine='netcdf4')
return self
def close(self):
if self.ds is not None:
self.ds.close()
self.ds = None
@property
def lon_lat(self):
attrs = {}
try:
attrs['latitude'] = float(self.ds.lat.values)
except TypeError or KeyError:
pass
try:
attrs['longitude'] = float(self.ds.lon.values)
except TypeError or KeyError:
pass
try:
attrs['altitude'] = float(self.ds.zsl.values)
except TypeError or KeyError:
pass
return attrs
def load_cube(self, var_name):
"""
Method to load a variable from the netcdf file and return it as
xr.DataArray.
Parameters
----------
var_name : str
The variable name, which should be extracted.
Returns
-------
variable : xr.DataArray
The DataArray of the variable.
"""
variable = self.ds[var_name]
if hasattr(variable, '_FillValue'):
variable.values[variable.values == variable._FillValue] = np.nan
elif hasattr(variable, 'missing_value'):
variable.values[variable.values == variable.missing_value] = np.nan
else:
variable.values[variable.values==9.96921e+36] = np.nan
return variable
def get_timeseries(self, var_name, **kwargs):
"""
Method to get the time series from a NetCDF file. This is designed for
measurement site data in netcdf format. At the moment this method is
only tested for Wettermast Hamburg data!
Parameters
----------
var_name : str
The variable name, which should be extracted.
Returns
-------
data : dict with pandas series
The selected variable is extracted as dict with pandas series as
values.
"""
cube = self.load_cube(var_name).load()
data = cube_to_series(cube, var_name)
return data
def get_messages(self, var_name, **kwargs):
"""
Method to imitate the message-like behaviour of grib files.
Parameters
----------
var_name : str
The variable name, which should be extracted.
runtime : np.datetime64, optional
If the dataset has no runtime this runtime is used. If the runtime
is not set, the runtime will be inferred from file name.
ensemble : int or str, optional
If the dataset has no ensemble information this ensemble is used. If
the ensemble is not set, the ensemble will be inferred from file
name.
sliced_coords : tuple(slice), optional
If the cube should be sliced before it is loaded. This is helpful
by large opendap requests. These slice will be used from the behind.
So (slice(1,2,1), slice(3,5,1)) means [..., 1:2, 3:5]. If it is not
set all data is used. T
Returns
-------
data : list of xr.DataArray
The list with the message-wise data as DataArray. The DataArray
have six coordinates (analysis, ensemble, time, level, y, x).
The shape of DataArray are normally (1,1,1,1,y_size,x_size).
"""
cube = self.load_cube(var_name)
if 'sliced_coords' in kwargs:
cube = cube[(...,)+kwargs['sliced_coords']]
cube.attrs.update(self.ds.attrs)
cube = cube.load()
cube = cube.pp.normalize_coords(
runtime=self._get_runtime(**kwargs),
ensemble=self._get_ensemble(**kwargs),
validtime=self._get_validtime(**kwargs)
)
return cube
|
gpl-3.0
|
pv/scikit-learn
|
sklearn/utils/fixes.py
|
133
|
12882
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
|
bsd-3-clause
|
winklerand/pandas
|
pandas/tests/indexes/timedeltas/test_timedelta.py
|
2
|
14189
|
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (timedelta_range, date_range, Series, Timedelta,
TimedeltaIndex, Index, DataFrame,
Int64Index)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_index_equal)
from ..datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
# Dummy method to override super's version; this test is now done
# in test_arithmetic.py
pass
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
def test_difference_freq(self):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_index_equal(cols, joined)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]),
check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]),
check_dtype=False)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
assert not idx.equals(list(idx))
non_td = Index(list('abc'))
assert not idx.equals(list(non_td))
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, exp)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = tm.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
assert (result['B'] == td).all()
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
tm.assert_index_equal(
rng.microseconds,
Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
tm.assert_index_equal(rng.nanoseconds,
Index([456, 456], dtype='int64'))
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
# preserve name (GH15589)
rng.name = 'name'
assert rng.days.name == 'name'
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
class TestTimeSeries(object):
_multiprocess_can_split_ = True
def test_series_box_timedelta(self):
rng = timedelta_range('1 day 1 s', periods=5, freq='h')
s = Series(rng)
assert isinstance(s[1], Timedelta)
assert isinstance(s.iat[2], Timedelta)
|
bsd-3-clause
|
matthewpklein/battsimpy
|
tests/dae_genPart.py
|
1
|
59229
|
import numpy
import numpy.linalg
import scipy.linalg
import scipy.interpolate
from scipy.signal import wiener, filtfilt, butter, gaussian
from scipy.ndimage import filters
from matplotlib import pyplot as plt
plt.style.use('classic')
from assimulo.solvers import IDA
from assimulo.problem import Implicit_Problem
from scipy.sparse.linalg import spsolve as sparseSolve
from scipy.sparse import csr_matrix as sparseMat
import scipy.sparse as sps
import scipy.sparse as sparse
import math
from copy import deepcopy
def compute_deriv( func, x0 ) :
y0 = func(x0)
J = numpy.zeros( (len(x0),len(x0)), dtype='d' )
x_higher = deepcopy(x0)
eps = 1e-8
for ivar in range(len(x0)) :
x_higher[ivar] = x_higher[ivar] + eps
# evaluate the function
y_higher = func(x_higher)
dy_dx = (y_higher-y0) / eps
J[:,ivar] = dy_dx
x_higher[ivar] = x0[ivar]
return J
def right_side_coeffs( h_n, h_n1 ) :
a_n = h_n / ( h_n1 * (h_n1+h_n) )
b_n = -( h_n1 + h_n) / ( h_n1 * h_n )
c_n = ( 2*h_n + h_n1 ) / ( h_n * (h_n1+h_n) )
return a_n, b_n, c_n
def left_side_coeffs( h_n, h_n1 ) :
a_n = -( 2*h_n + h_n1 ) / ( h_n * (h_n1+h_n) )
b_n = ( h_n1 + h_n) / ( h_n1 * h_n )
c_n = - h_n / ( h_n1 * (h_n1+h_n) )
return a_n, b_n, c_n
def build_interp_2d( path ) :
raw_map = numpy.loadtxt( path, delimiter="," )
v1 = raw_map[1:,0]
v2 = raw_map[0,1:]
dat_map = raw_map[1:,1:]
if v1[1] < v1[0] :
v1 = numpy.flipud( v1 )
dat_map = numpy.flipud(dat_map)
if v2[1] < v2[0] :
v2 = numpy.flipud( v2 )
dat_map = numpy.fliplr(dat_map)
return scipy.interpolate.RectBivariateSpline( v1, v2, dat_map )
def ButterworthFilter( x, y, ff=0.2 ) :
b, a = butter(1, ff)
fl = filtfilt( b, a, y )
return fl
def get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 ) :
"""
Smooth the Uref data to aid in improving numerical stability.
This should be verified by the user to ensure it is not changing the original
Uref data beyond a tolerable amount (defined by the user).
A linear interpolator class is output for Uref and dUref_dx for both anode
and cathode.
"""
## Load the data files
uref_a_map = numpy.loadtxt( Ua_path, delimiter=',' )
uref_c_map = numpy.loadtxt( Uc_path, delimiter=',' )
if uref_a_map[1,0] < uref_a_map[0,0] :
uref_a_map = numpy.flipud( uref_a_map )
if uref_c_map[1,0] < uref_c_map[0,0] :
uref_c_map = numpy.flipud( uref_c_map )
xa = uref_a_map[:,0]
xc = uref_c_map[:,0]
# big_xa = numpy.linspace( xa[0], xa[-1], 300 )
# big_xc = numpy.linspace( xc[0], xc[-1], 300 )
# big_Ua = numpy.interp( big_xa, xa, uref_a_map[:,1] )
# big_Uc = numpy.interp( big_xc, xc, uref_c_map[:,1] )
# numpy.savetxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv', numpy.array([big_xa, big_Ua]).T, delimiter=',' )
# numpy.savetxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv', numpy.array([big_xc, big_Uc]).T, delimiter=',' )
## Smooth the signals
Ua_butter = ButterworthFilter( xa, uref_a_map[:,1], ff=ffa )
Uc_butter = ButterworthFilter( xc, uref_c_map[:,1], ff=ffc )
## Create the interpolators
Ua_intp = scipy.interpolate.interp1d( xa, Ua_butter, kind='linear' )
Uc_intp = scipy.interpolate.interp1d( xc, Uc_butter, kind='linear' )
# duref_a_map = numpy.gradient( uref_a_map[:,1] ) / numpy.gradient( xa )
# duref_c_map = numpy.gradient( uref_c_map[:,1] ) / numpy.gradient( xc )
duref_a = numpy.gradient( Ua_butter ) / numpy.gradient( xa )
duref_c = numpy.gradient( Uc_butter ) / numpy.gradient( xc )
dUa_intp = scipy.interpolate.interp1d( xa, duref_a, kind='linear' )
dUc_intp = scipy.interpolate.interp1d( xc, duref_c, kind='linear' )
# # Plot the Uref data for verification
# plt.figure()
# plt.plot( xa, uref_a_map[:,1], label='Ua map' )
# plt.plot( xc, uref_c_map[:,1], label='Uc map' )
## plt.plot( xa, Ua_butter, label='Ua butter' )
## plt.plot( xc, Uc_butter, label='Uc butter' )
# plt.plot( xa, self.uref_a(xa), label='Ua interp lin' )
# plt.plot( xc, self.uref_c(xc), label='Uc interp lin' )
# plt.legend()
# plt.figure()
# plt.plot( xa, duref_a_map, label='dUa map' )
# plt.plot( xc, duref_c_map, label='dUc map' )
## plt.plot( xa, duref_a_b, label='dUa B' )
## plt.plot( xc, duref_c_b, label='dUc B' )
# plt.plot( xa, self.duref_a_interp(xa), label='dUa interp butter' )
# plt.plot( xc, self.duref_c_interp(xc), label='dUc interp butter' )
# plt.legend()
# plt.show()
return Ua_intp, Uc_intp, dUa_intp, dUc_intp
def nonlinspace( Rf,k,N ) :
r = numpy.zeros(N)
for i in range(N) :
r[i] = (1./k)**(-i)
if k!=1 :
r=max(r)-r
r=r/max(r)*Rf
else :
r=r*Rf
return r
def mid_to_edge( var_mid, x_e ) :
var_edge = numpy.array( [var_mid[0]] + [ var_mid[i]*var_mid[i+1]/( ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i])))*var_mid[i+1] + (1- ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i]))))*var_mid[i] ) for i in range(len(var_mid)-1) ] + [var_mid[-1]] )
return var_edge
def flux_mat_builder( N, x_m, vols, P ) :
A = numpy.zeros([N,N], dtype='d')
for i in range(1,N-1) :
A[i,i-1] = (1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1]) - (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[i,i+1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i ])
i=0
A[0,0] = -(1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[0,1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
i=N-1
A[i,i-1] = (1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
return A
class MyProblem( Implicit_Problem ) :
def __init__(self, Na, Ns, Nc, Nra, Nrc, X, Ra, Rc, Ac, bsp_dir, y0, yd0, name ) :
Implicit_Problem.__init__(self,y0=y0,yd0=yd0,name=name)
self.T = 298.15 # Cell temperature, [K]
self.Ac = Ac # Cell coated area, [m^2]
### Control volumes and node points (mid node points and edge node points)
self.Ns = Ns
self.Na = Na
self.Nc = Nc
self.N = Na + Ns + Nc
self.X = X
self.x_e = numpy.linspace( 0.0, X, N+1 )
self.x_m = numpy.array( [ 0.5*(self.x_e[i+1]+self.x_e[i]) for i in range(N) ], dtype='d' )
self.vols = numpy.array( [ (self.x_e[i+1] - self.x_e[i]) for i in range(N)], dtype='d' )
# Radial mesh
self.Nra = Nra
self.Nrc = Nrc
k=0.85
self.r_e_a = nonlinspace( Ra, k, Nra+1 )
self.r_m_a = numpy.array( [ 0.5*(self.r_e_a[i+1]+self.r_e_a[i]) for i in range(Nra) ], dtype='d' )
self.r_e_c = nonlinspace( Rc, k, Nrc+1 )
self.r_m_c = numpy.array( [ 0.5*(self.r_e_c[i+1]+self.r_e_c[i]) for i in range(Nrc) ], dtype='d' )
self.vols_ra_m = numpy.array( [ 1/3.*(self.r_e_a[i+1]**3 - self.r_e_a[i]**3) for i in range(Nra)], dtype='d' )
self.vols_rc_m = numpy.array( [ 1/3.*(self.r_e_c[i+1]**3 - self.r_e_c[i]**3) for i in range(Nrc)], dtype='d' )
# Useful sub-meshes for the phi_s functions
self.x_m_a = self.x_m[:Na]
self.x_m_c = self.x_m[-Nc:]
self.x_e_a = self.x_e[:Na+1]
self.x_e_c = self.x_e[-Nc-1:]
self.vols_a = self.vols[:Na]
self.vols_c = self.vols[-Nc:]
self.num_diff_vars = self.N + self.Nra*self.Na + self.Nrc*self.Nc
self.num_algr_vars = self.Na+self.Nc + self.N + self.Na+self.Nc
### Volume fraction vectors and matrices for effective parameters
self.La, self.Ls, self.Lc = self.Na*X/self.N, self.Ns*X/self.N, self.Nc*X/self.N
self.Na, self.Ns, self.Nc = Na, Ns, Nc
eps_a = 0.25
eps_s = 0.45
eps_c = 0.2
ba, bs, bc = 1.2, 0.5, 0.5
eps_a_vec = [ eps_a for i in range(Na) ] # list( eps_a + eps_a/2.*numpy.sin(numpy.linspace(0.,Na/4,Na)) ) # list(eps_a + eps_a*numpy.random.randn(Na)/5.) #
eps_s_vec = [ eps_s for i in range(Ns) ]
eps_c_vec = [ eps_c for i in range(Nc) ] # list( eps_c + eps_c/2.*numpy.sin(numpy.linspace(0.,Nc/4,Nc)) ) # list(eps_c + eps_c*numpy.random.randn(Nc)/5.) #
self.eps_m = numpy.array( eps_a_vec + eps_s_vec + eps_c_vec, dtype='d' )
self.k_m = 1./self.eps_m
self.eps_mb = numpy.array( [ ea**ba for ea in eps_a_vec ] + [ es**bs for es in eps_s_vec ] + [ ec**bc for ec in eps_c_vec ], dtype='d' )
self.eps_eff = numpy.array( [ ea**(1.+ba) for ea in eps_a_vec ] + [ es**(1.+bs) for es in eps_s_vec ] + [ ec**(1.+bc) for ec in eps_c_vec ], dtype='d' )
self.eps_a_eff = self.eps_eff[:Na]
self.eps_c_eff = self.eps_eff[-Nc:]
self.K_m = numpy.diag( self.k_m )
t_plus = 0.36
F = 96485.0
self.t_plus = t_plus
self.F = F
self.R_gas = 8.314
self.Rp_a = Ra
self.Rp_c = Rc
as_a = 3.*(1.0-numpy.array(eps_a_vec, dtype='d'))/self.Rp_a
as_c = 3.*(1.0-numpy.array(eps_c_vec, dtype='d'))/self.Rp_c
self.as_a = as_a
self.as_c = as_c
self.as_a_mean = 1./self.La*sum( [ asa*v for asa,v in zip(as_a, self.vols[:Na]) ] )
self.as_c_mean = 1./self.Lc*sum( [ asc*v for asc,v in zip(as_c, self.vols[-Nc:]) ] )
print 'asa diff', self.as_a_mean - as_a[0]
print 'asc diff', self.as_c_mean - as_c[0]
### Electrolyte constant B_ce matrix
Ba = [ (1.-t_plus)*asa/ea for ea, asa in zip(eps_a_vec,as_a) ]
Bs = [ 0.0 for i in range(Ns) ]
Bc = [ (1.-t_plus)*asc/ec for ec, asc in zip(eps_c_vec,as_c) ]
self.B_ce = numpy.diag( numpy.array(Ba+Bs+Bc, dtype='d') )
Bap = [ asa*F for asa in as_a ]
Bsp = [ 0.0 for i in range(Ns) ]
Bcp = [ asc*F for asc in as_c ]
self.B2_pe = numpy.diag( numpy.array(Bap+Bsp+Bcp, dtype='d') )
# Interpolators for De, ke
self.De_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/De.csv' )
self.ke_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/kappa.csv' )
self.fca_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/fca.csv' )
### Solid phase parameters and j vector matrices
self.sig_a = 100. # [S/m]
self.sig_c = 40. # [S/m]
self.sig_a_eff = self.sig_a * self.eps_a_eff
self.sig_c_eff = self.sig_c * self.eps_c_eff
self.A_ps_a = flux_mat_builder( self.Na, self.x_m_a, numpy.ones_like(self.vols_a), self.sig_a_eff )
self.A_ps_c = flux_mat_builder( self.Nc, self.x_m_c, numpy.ones_like(self.vols_c), self.sig_c_eff )
# Grounding form for BCs (was only needed during testing, before BVK was incorporated for coupling
# self.A_ps_a[-1,-1] = 2*self.A_ps_a[-1,-1]
# self.A_ps_c[ 0, 0] = 2*self.A_ps_c[ 0, 0]
Baps = numpy.array( [ asa*F*dxa for asa,dxa in zip(as_a, self.vols_a) ], dtype='d' )
Bcps = numpy.array( [ asc*F*dxc for asc,dxc in zip(as_c, self.vols_c) ], dtype='d' )
self.B_ps_a = numpy.diag( Baps )
self.B_ps_c = numpy.diag( Bcps )
self.B2_ps_a = numpy.zeros( self.Na, dtype='d' )
self.B2_ps_a[ 0] = -1.
self.B2_ps_c = numpy.zeros( self.Nc, dtype='d' )
self.B2_ps_c[-1] = -1.
### Solid phase diffusion model
# Load the Ds data files
Dsa_map = numpy.loadtxt( bsp_dir+'data/Model_v1/Model_Pars/solid/diffusion/Ds_anode.csv', delimiter="," )
Dsc_map = numpy.loadtxt( bsp_dir+'data/Model_v1/Model_Pars/solid/diffusion/Ds_cathode.csv', delimiter="," )
if Dsa_map[1,0] < Dsa_map[0,0] :
Dsa_map = numpy.flipud( Dsa_map )
if Dsc_map[1,0] < Dsc_map[0,0] :
Dsc_map = numpy.flipud( Dsc_map )
## Create the interpolators
self.Dsa_intp = scipy.interpolate.interp1d( Dsa_map[:,0], Dsa_map[:,1], kind='linear' )
self.Dsc_intp = scipy.interpolate.interp1d( Dsc_map[:,0], Dsc_map[:,1], kind='linear' )
Dsa = numpy.mean(Dsa_map[:,1])
Dsc = numpy.mean(Dsc_map[:,1])
self.Dsa = Dsa
self.Dsc = Dsc
self.csa_max = 30555.0 # [mol/m^3]
self.csc_max = 51554.0 # [mol/m^3]
## Two parameter Solid phase diffusion model
# self.B_cs_a = numpy.diag( numpy.array( [-3.0/self.Rp_a for i in range(Na)], dtype='d' ) )
# self.B_cs_c = numpy.diag( numpy.array( [-3.0/self.Rp_c for i in range(Nc)], dtype='d' ) )
# self.C_cs_a = numpy.eye(Na)
# self.C_cs_c = numpy.eye(Nc)
# self.D_cs_a = numpy.diag( numpy.array( [-self.Rp_a/Dsa/5.0 for i in range(Na)], dtype='d' ) )
# self.D_cs_c = numpy.diag( numpy.array( [-self.Rp_c/Dsc/5.0 for i in range(Nc)], dtype='d' ) )
## 1D spherical diffusion model
# A_cs pre build
self.A_csa_single = flux_mat_builder( Nra, self.r_m_a, self.vols_ra_m, Dsa*(self.r_e_a**2) )
self.A_csc_single = flux_mat_builder( Nrc, self.r_m_c, self.vols_rc_m, Dsc*(self.r_e_c**2) )
# A_cs build up to the stacked full cs size (Nr and Nx)
b = [self.A_csa_single]*Na
self.A_cs_a = scipy.linalg.block_diag( *b )
b = [self.A_csc_single]*Nc
self.A_cs_c = scipy.linalg.block_diag( *b )
# B_cs and C_cs are constant (i.e., are not state-dependent)
self.B_csa_single = numpy.array( [ 0. for i in range(Nra-1) ]+[-1.*self.r_e_a[-1]**2/self.vols_ra_m[-1]], dtype='d' )
self.B_csc_single = numpy.array( [ 0. for i in range(Nrc-1) ]+[-1.*self.r_e_c[-1]**2/self.vols_rc_m[-1]], dtype='d' )
b = [self.B_csa_single]*Na
self.B_cs_a = scipy.linalg.block_diag( *b ).T
b = [self.B_csc_single]*Nc
self.B_cs_c = scipy.linalg.block_diag( *b ).T
# Particle surface concentration
h_na = self.r_e_a[-1] - self.r_m_a[-1]
h_n1a = self.r_m_a[-1] - self.r_m_a[-2]
h_nc = self.r_e_c[-1] - self.r_m_c[-1]
h_n1c = self.r_m_c[-1] - self.r_m_c[-2]
self.a_n_a, self.b_n_a, self.c_n_a = right_side_coeffs( h_na, h_n1a )
self.a_n_c, self.b_n_c, self.c_n_c = right_side_coeffs( h_nc, h_n1c )
self.C_cs_a_single = numpy.array( [0. for i in range(Nra-2)]+[-self.a_n_a/self.c_n_a, -self.b_n_a/self.c_n_a], dtype='d' )
self.C_cs_c_single = numpy.array( [0. for i in range(Nrc-2)]+[-self.a_n_c/self.c_n_c, -self.b_n_c/self.c_n_c], dtype='d' )
self.C_cs_a = scipy.linalg.block_diag( *[self.C_cs_a_single]*Na )
self.C_cs_c = scipy.linalg.block_diag( *[self.C_cs_c_single]*Nc )
# Particle core concentration
h_na = self.r_e_a[0] - self.r_m_a[0]
h_n1a = self.r_m_a[1] - self.r_m_a[0]
h_nc = self.r_e_c[0] - self.r_m_c[0]
h_n1c = self.r_m_c[1] - self.r_m_c[0]
a_n_a, b_n_a, c_n_a = left_side_coeffs( h_na, h_n1a )
a_n_c, b_n_c, c_n_c = left_side_coeffs( h_nc, h_n1c )
C_cso_a_single = numpy.array( [-b_n_a/a_n_a, -c_n_a/a_n_a] + [0. for i in range(Nra-2)], dtype='d' )
C_cso_c_single = numpy.array( [-b_n_c/a_n_c, -c_n_c/a_n_c] + [0. for i in range(Nrc-2)], dtype='d' )
self.C_cso_a = scipy.linalg.block_diag( *[C_cso_a_single]*Na )
self.C_cso_c = scipy.linalg.block_diag( *[C_cso_c_single]*Nc )
# D_cs prelim values, note this is Ds(cs) dependent and therefore requires updating for state dependent Ds
self.D_cs_a = -1.0/(Dsa*self.c_n_a)*numpy.eye( Na )
self.D_cs_c = -1.0/(Dsc*self.c_n_c)*numpy.eye( Nc )
### OCV
Ua_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv'
Uc_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv'
self.uref_a, self.uref_c, self.duref_a, self.duref_c = get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 )
### Reaction kinetics parameters
self.io_a = 5.0 # [A/m^2]
self.io_c = 5.0 # [A/m^2]
### System indices
self.ce_inds = range( self.N )
self.ce_inds_r = numpy.reshape( self.ce_inds, [len(self.ce_inds),1] )
self.ce_inds_c = numpy.reshape( self.ce_inds, [1,len(self.ce_inds)] )
self.csa_inds = range( self.N, self.N + (self.Na*self.Nra) )
self.csa_inds_r = numpy.reshape( self.csa_inds, [len(self.csa_inds),1] )
self.csa_inds_c = numpy.reshape( self.csa_inds, [1,len(self.csa_inds)] )
self.csc_inds = range( self.N + (self.Na*self.Nra), self.N + (self.Na*self.Nra) + (self.Nc*self.Nrc) )
self.csc_inds_r = numpy.reshape( self.csc_inds, [len(self.csc_inds),1] )
self.csc_inds_c = numpy.reshape( self.csc_inds, [1,len(self.csc_inds)] )
self.T_ind = self.N + (self.Na*self.Nra) + (self.Nc*self.Nrc)
c_end = self.N + (self.Na*self.Nra) + (self.Nc*self.Nrc) + 1
self.ja_inds = range(c_end, c_end+self.Na)
self.ja_inds_r = numpy.reshape( self.ja_inds, [len(self.ja_inds),1] )
self.ja_inds_c = numpy.reshape( self.ja_inds, [1,len(self.ja_inds)] )
self.jc_inds = range(c_end+self.Na, c_end+self.Na +self.Nc)
self.jc_inds_r = numpy.reshape( self.jc_inds, [len(self.jc_inds),1] )
self.jc_inds_c = numpy.reshape( self.jc_inds, [1,len(self.jc_inds)] )
self.pe_inds = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.N )
self.pe_inds_r = numpy.reshape( self.pe_inds, [len(self.pe_inds),1] )
self.pe_inds_c = numpy.reshape( self.pe_inds, [1,len(self.pe_inds)] )
self.pe_a_inds = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.Na )
self.pe_a_inds_r = numpy.reshape( self.pe_a_inds, [len(self.pe_a_inds),1] )
self.pe_a_inds_c = numpy.reshape( self.pe_a_inds, [1,len(self.pe_a_inds)] )
self.pe_c_inds = range( c_end+self.Na+self.Nc +self.Na+self.Ns, c_end+self.Na+self.Nc +self.N )
self.pe_c_inds_r = numpy.reshape( self.pe_c_inds, [len(self.pe_c_inds),1] )
self.pe_c_inds_c = numpy.reshape( self.pe_c_inds, [1,len(self.pe_c_inds)] )
self.pa_inds = range( c_end+self.Na+self.Nc+self.N, c_end+self.Na+self.Nc+self.N +self.Na )
self.pa_inds_r = numpy.reshape( self.pa_inds, [len(self.pa_inds),1] )
self.pa_inds_c = numpy.reshape( self.pa_inds, [1,len(self.pa_inds)] )
self.pc_inds = range( c_end+self.Na+self.Nc+self.N+self.Na, c_end+self.Na+self.Nc+self.N+self.Na +self.Nc )
self.pc_inds_r = numpy.reshape( self.pc_inds, [len(self.pc_inds),1] )
self.pc_inds_c = numpy.reshape( self.pc_inds, [1,len(self.pc_inds)] )
# second set for manual jac version
c_end = 0
self.ja_inds2 = range(c_end, c_end+self.Na)
self.ja_inds_r2 = numpy.reshape( self.ja_inds2, [len(self.ja_inds2),1] )
self.ja_inds_c2 = numpy.reshape( self.ja_inds2, [1,len(self.ja_inds2)] )
self.jc_inds2 = range(c_end+self.Na, c_end+self.Na +self.Nc)
self.jc_inds_r2 = numpy.reshape( self.jc_inds2, [len(self.jc_inds2),1] )
self.jc_inds_c2 = numpy.reshape( self.jc_inds2, [1,len(self.jc_inds2)] )
self.pe_inds2 = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.N )
self.pe_inds_r2 = numpy.reshape( self.pe_inds2, [len(self.pe_inds2),1] )
self.pe_inds_c2 = numpy.reshape( self.pe_inds2, [1,len(self.pe_inds2)] )
self.pe_a_inds2 = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.Na )
self.pe_a_inds_r2 = numpy.reshape( self.pe_a_inds2, [len(self.pe_a_inds2),1] )
self.pe_a_inds_c2 = numpy.reshape( self.pe_a_inds2, [1,len(self.pe_a_inds2)] )
self.pe_c_inds2 = range( c_end+self.Na+self.Nc +self.Na+self.Ns, c_end+self.Na+self.Nc +self.N )
self.pe_c_inds_r2 = numpy.reshape( self.pe_c_inds2, [len(self.pe_c_inds2),1] )
self.pe_c_inds_c2 = numpy.reshape( self.pe_c_inds2, [1,len(self.pe_c_inds2)] )
self.pa_inds2 = range( c_end+self.Na+self.Nc+self.N, c_end+self.Na+self.Nc+self.N +self.Na )
self.pa_inds_r2 = numpy.reshape( self.pa_inds2, [len(self.pa_inds2),1] )
self.pa_inds_c2 = numpy.reshape( self.pa_inds2, [1,len(self.pa_inds2)] )
self.pc_inds2 = range( c_end+self.Na+self.Nc+self.N+self.Na, c_end+self.Na+self.Nc+self.N+self.Na +self.Nc )
self.pc_inds_r2 = numpy.reshape( self.pc_inds2, [len(self.pc_inds2),1] )
self.pc_inds_c2 = numpy.reshape( self.pc_inds2, [1,len(self.pc_inds2)] )
def set_iapp( self, I_app ) :
self.i_app = I_app / self.Ac
# cs mats
def update_cs_mats( self, csa, csc, csa_ss, csc_ss, csa_o, csc_o ) :
Acsa_list = [ [] for i in range(self.Na) ]
Acsc_list = [ [] for i in range(self.Nc) ]
Dsa_ss = [ 0. for i in range(self.Na) ]
Dsc_ss = [ 0. for i in range(self.Nc) ]
for ia in range(self.Na) :
csa_m = csa[ia*self.Nra:(ia+1)*self.Nra]
csa_e = numpy.array( [csa_o[ia]] + [ 0.5*(csa_m[i+1]+csa_m[i]) for i in range(self.Nra-1) ] + [csa_ss[ia]] )
Ua_e = self.uref_a( csa_e/self.csa_max )
Dsa_e = self.Dsa_intp( Ua_e )
Acsa_list[ia] = flux_mat_builder( self.Nra, self.r_m_a, self.vols_ra_m, Dsa_e*(self.r_e_a**2) )
Dsa_ss[ia] = Dsa_e[-1]
for ic in range(self.Nc) :
csc_m = csc[ic*self.Nrc:(ic+1)*self.Nrc]
csc_e = numpy.array( [csc_o[ic]] + [ 0.5*(csc_m[i+1]+csc_m[i]) for i in range(self.Nrc-1) ] + [csc_ss[ic]] )
Uc_e = self.uref_c( csc_e/self.csc_max )
Dsc_e = self.Dsc_intp( Uc_e )
Acsc_list[ic] = flux_mat_builder( self.Nrc, self.r_m_c, self.vols_rc_m, Dsc_e*(self.r_e_c**2) )
Dsc_ss[ic] = Dsc_e[-1]
# b = self.A_csa_single.reshape(1,Nra,Nra).repeat(Na,axis=0)
self.A_cs_a = scipy.linalg.block_diag( *Acsa_list )
self.A_cs_c = scipy.linalg.block_diag( *Acsc_list )
self.D_cs_a = numpy.diag( -1.0/(numpy.array(Dsa_ss)*self.c_n_a) )
self.D_cs_c = numpy.diag( -1.0/(numpy.array(Dsc_ss)*self.c_n_c) )
## Define c_e functions
def build_Ace_mat( self, c ) :
D_eff = self.Diff_ce( c )
A = self.K_m.dot( flux_mat_builder( self.N, self.x_m, self.vols, D_eff ) )
return A
def Diff_ce( self, c ) :
T = self.T
# D_ce = 1e-4 * 10.0**( -4.43 - (54./(T-229.-5e-3*c)) - (0.22e-3*c) ) ## Torchio (LIONSIMBA) ECS paper
D_ce = self.De_intp( c, T, grid=False ).flatten()
D_mid = D_ce * self.eps_eff
if type(c) == float :
D_edge = D_mid
else :
D_edge = mid_to_edge( D_mid, self.x_e )
return D_edge
## Define phi_e functions
def build_Ape_mat( self, c ) :
k_eff = self.kapp_ce( c )
A = flux_mat_builder( self.N, self.x_m, self.vols, k_eff )
A[-1,-1] = 2*A[-1,-1] # BC update for phi_e = 0
return A
def build_Bpe_mat( self, c ) :
gam = 2.*(1.-self.t_plus)*self.R_gas*self.T / self.F
k_eff = self.kapp_ce( c )
c_edge = mid_to_edge( c, self.x_e )
B1 = flux_mat_builder( self.N, self.x_m, self.vols, k_eff*gam/c_edge )
return B1
def kapp_ce( self, c, mid_on=0 ) :
T = self.T
# k_ce = 1e-4 * c *( -10.5 +0.668e-3*c + 0.494e-6*c**2
# + (0.074 - 1.78*1e-5*c - 8.86e-10*c**2)*T
# + (-6.96e-5 + 2.8e-8*c)*T**2 )**2 ## Torchio (LIONSIMBA) ECS paper
k_ce = 1e-1*self.ke_intp( c, T, grid=False ).flatten() # 1e-1 converts from mS/cm to S/m (model uses SI units)
k_mid = k_ce * self.eps_eff
if mid_on :
k_out = k_mid
else :
if type(c) == float :
k_out = k_mid
else :
k_out = mid_to_edge( k_mid, self.x_e )
return k_edge
def build_Bjac_mat( self, eta, a, b ) :
d = a*numpy.cosh( b*eta )*b
return numpy.diag( d )
def get_voltage( self, y ) :
"""
Return the cell potential
"""
pc = y[self.pc_inds]
pa = y[self.pa_inds]
Vcell = pc[-1] - pa[0]
return Vcell
def calc_heat( self, ce, csa, csc, ja, jc, phi, phi_s_a, phi_s_c, eta_a, eta_c ) :
"""
Return the total integrated heat source across the cell sandwich
"""
# Gradients for heat calc
dphi_s_a = numpy.gradient( phi_s_a ) / numpy.gradient( self.x_m_a )
dphi_s_c = numpy.gradient( phi_s_c ) / numpy.gradient( self.x_m_c )
dphi = numpy.gradient( phi ) / numpy.gradient( self.x_m )
dlnce = 1./ce * ( numpy.gradient(ce) / numpy.gradient( self.x_m ) )
kapp_eff_m = self.kapp_ce( c, mid_on=1 ) # kapp_eff at the node points (middle of control volume, rather than edge)
K = numpy.diag(kapp_eff_m)
dp = self.G.dot(phi)
# Reaction kinetics heat
Q_rxn_a = sum( (self.F*self.as_a*ja*eta_a)*self.vols_a )
Q_rxn_c = sum( (self.F*self.as_c*jc*eta_c)*self.vols_c )
Q_rxn = Q_rxn_a + Q_rxn_c
# Ohmic heat in electrolyte and solid
Q_ohm_e = sum( ( kapp_eff_m*(dphi)**2 + (2*kapp_eff_m*self.R*self.T/self.F*(1-self.t_plus))*dlnce*dphi )*self.vols )
Q_ohm_s = sum( (self.sig_a_eff*(dphi_s_a)**2)*self.vols_a ) + sum( (self.sig_c_eff*(dphi_s_c)**2)*self.vols_c )
Q_ohm = Q_ohm_e + Q_ohm_s
# Entropic heat
## ??
# Total heat
Q_tot = Q_ohm + Q_rxn
return Q_tot
## Define system equations
def res( self, t, y, yd ) :
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
c_dots = yd[self.ce_inds]
# Solid conc a:anode, c:cathode
csa = y[ self.csa_inds]
csc = y[ self.csc_inds]
csa_dt = yd[self.csa_inds]
csc_dt = yd[self.csc_inds]
# Reaction (Butler-Volmer Kinetics)
ja_rxn = y[self.ja_inds]
jc_rxn = y[self.jc_inds]
# E-lyte potential
phi = y[self.pe_inds]
# Solid potential
phi_s_a = y[self.pa_inds]
phi_s_c = y[self.pc_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
# For Solid conc Ds
csa_ss = (self.C_cs_a.dot(csa)).flatten() + (self.D_cs_a.dot(ja_rxn)).flatten()
csc_ss = (self.C_cs_c.dot(csc)).flatten() + (self.D_cs_c.dot(jc_rxn)).flatten()
csa_o = (self.C_cso_a.dot(csa)).flatten()
csc_o = (self.C_cso_c.dot(csc)).flatten()
self.update_cs_mats( csa, csc, csa_ss, csc_ss, csa_o, csc_o )
## Compute extra variables
# For the reaction kinetics
Uref_a = self.uref_a( csa_ss/self.csa_max ) # anode equilibrium potential
Uref_c = self.uref_c( csc_ss/self.csc_max ) # cathode equilibrium potential
eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
# ja = 2.0*self.io_a * numpy.sqrt( ce[:self.Na]/self.ce_nom * (1.0 - csa_ss/self.csa_max) * (csa_ss/self.csa_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_a )
# jc = 2.0*self.io_c * numpy.sqrt( ce[-self.Nc:]/self.ce_nom * (1.0 - csc_ss/self.csc_max) * (csc_ss/self.csc_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_c )
ja = 2.0*self.io_a/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
jc = 2.0*self.io_c/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
## Compute the residuals
# Time deriv components
r1 = c_dots - ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
r2 = csa_dt - (self.A_cs_a.dot(csa).flatten() + self.B_cs_a.dot(ja_rxn).flatten()) # Anode conc
r3 = csc_dt - (self.A_cs_c.dot(csc).flatten() + self.B_cs_c.dot(jc_rxn).flatten()) # Cathode conc
# Algebraic components
r4 = ja_rxn - ja
r5 = jc_rxn - jc
r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential
r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
res_out = numpy.concatenate( [r1, r2, r3, r4, r5, r6, r7, r8] )
return res_out
def jac( self, c, t, y, yd ) :
### Setup
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
c_dots = yd[self.ce_inds]
# Solid conc a:anode, c:cathode
csa = y[ self.csa_inds]
csc = y[ self.csc_inds]
csa_dt = yd[self.csa_inds]
csc_dt = yd[self.csc_inds]
# Reaction (Butler-Volmer Kinetics)
ja_rxn = y[self.ja_inds]
jc_rxn = y[self.jc_inds]
# E-lyte potential
phi = y[self.pe_inds]
# Solid potential
phi_s_a = y[self.pa_inds]
phi_s_c = y[self.pc_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
## Compute extra variables
# For the reaction kinetics
# csa_ss = numpy.array( [ csa[(i+1)*(self.Nra)-1] for i in range(self.Na) ] )
# csc_ss = numpy.array( [ csc[(i+1)*(self.Nrc)-1] for i in range(self.Nc) ] )
csa_ss = (self.C_cs_a.dot(csa)).flatten() + (self.D_cs_a.dot(ja_rxn)).flatten()
csc_ss = (self.C_cs_c.dot(csc)).flatten() + (self.D_cs_c.dot(jc_rxn)).flatten()
Uref_a = self.uref_a( csa_ss/self.csa_max ) # anode equilibrium potential
Uref_c = self.uref_c( csc_ss/self.csc_max ) # cathode equilibrium potential
eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
###
### Build the Jac matrix
## Self coupling
A_dots = numpy.diag( [1*c for i in range(self.num_diff_vars)] )
j_c = A_dots - scipy.linalg.block_diag( A_ce, self.A_cs_a, self.A_cs_c )
Bjac_a = self.build_Bjac_mat( eta_a, 2.0*self.io_a/self.F, 0.5*self.F/(self.R_gas*self.T) )
Bjac_c = self.build_Bjac_mat( eta_c, 2.0*self.io_c/self.F, 0.5*self.F/(self.R_gas*self.T) )
DUDcsa_ss = numpy.diag( (1.0/self.csa_max)*self.duref_a(csa_ss/self.csa_max) )
DUDcsc_ss = numpy.diag( (1.0/self.csc_max)*self.duref_c(csc_ss/self.csc_max) )
A_ja = numpy.diag(numpy.ones(self.Na)) - (Bjac_a.dot(-1.0*DUDcsa_ss*1.0)).dot( self.D_cs_a )
A_jc = numpy.diag(numpy.ones(self.Nc)) - (Bjac_c.dot(-1.0*DUDcsc_ss*1.0)).dot( self.D_cs_c )
j = scipy.linalg.block_diag( j_c, A_ja, A_jc, A_pe, self.A_ps_a, self.A_ps_c )
## Cross coupling
# c_e: j coupling back in
j[ numpy.ix_(self.ce_inds, self.ja_inds) ] = -self.B_ce[:, :self.Na ]
j[ numpy.ix_(self.ce_inds, self.jc_inds) ] = -self.B_ce[:, -self.Nc:]
# cs_a: j coupling
j[ numpy.ix_(self.csa_inds, self.ja_inds) ] = -self.B_cs_a
# cs_c: j coupling
j[ numpy.ix_(self.csc_inds, self.jc_inds) ] = -self.B_cs_c
# T
# j_a: pe, pa, csa coupling
j[numpy.ix_(self.ja_inds, self.pa_inds )] = -Bjac_a*( 1.0)
j[numpy.ix_(self.ja_inds, self.pe_a_inds)] = -Bjac_a*(-1.0)
j[numpy.ix_(self.ja_inds, self.csa_inds )] = -(Bjac_a.dot(-1.0*DUDcsa_ss*1.0)).dot( self.C_cs_a )
# j_c: pe, pc, csc coupling
j[numpy.ix_(self.jc_inds, self.pc_inds )] = -Bjac_c*( 1.0)
j[numpy.ix_(self.jc_inds, self.pe_c_inds)] = -Bjac_c*(-1.0)
j[numpy.ix_(self.jc_inds, self.csc_inds )] = -(Bjac_c.dot(-1.0*DUDcsc_ss*1.0)).dot( self.C_cs_c )
# phi_e: ce coupling into phi_e equation
j[numpy.ix_(self.pe_inds,self.ce_inds)] = -B_pe
j[numpy.ix_(self.pe_inds,self.ja_inds)] = self.B2_pe[:,:self.Na]
j[numpy.ix_(self.pe_inds,self.jc_inds)] = self.B2_pe[:,-self.Nc:]
# phi_s_a: ja
j[numpy.ix_(self.pa_inds,self.ja_inds)] = -self.B_ps_a
# phi_s_c: jc
j[numpy.ix_(self.pc_inds,self.jc_inds)] = -self.B_ps_c
###
return j
csa_max = 30555.0 # [mol/m^3]
csc_max = 51554.0 # [mol/m^3]
#bsp_dir = '/home/m_klein/Projects/battsimpy/'
bsp_dir = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
#bsp_dir = '/Users/mk/Desktop/battsim/battsimpy/'
Ua_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv'
Uc_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv'
uref_a, uref_c, duref_a, duref_c = get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 )
xa_init, xc_init = 0.8, 0.37
ca_init = xa_init*csa_max
cc_init = xc_init*csc_max
Ua_init = uref_a( xa_init )
Uc_init = uref_c( xc_init )
print Ua_init
print Uc_init
### Mesh
La = 65.0
Ls = 25.0
Lc = 55.0
Lt = (La+Ls+Lc)
X = Lt*1e-6 # [m]
N = 80
Ns = int(N*(Ls/Lt))
Na = int(N*(La/Lt))
Nc = N - Ns - Na
print 'Na, Ns, Nc:', Na, Ns, Nc
Nra = 10
Nrc = 15
Ra = 12.0e-6
Rc = 6.5e-6
Crate = 3.
Vcut = 3.0 # [V], cutoff voltage for end of discharge
ce_lims = [50.,3700.]
cell_coated_area = 1.0 # [m^2]
cell_cap = 29.0
I_app = Crate*cell_cap # A
#i_app = I_app / cell_coated_area # current density, [A/m^2]
### Initial conditions
# E-lyte conc
c_init = 1100.0 # [mol/m^3]
c_centered = c_init*numpy.ones( N, dtype='d' )
# E-lyte potential
p_init = 0.0 # [V]
p_centered = p_init*numpy.ones( N, dtype='d' )
# Solid potential on anode and cathode
pa_init = Ua_init #0.0 # [V]
pa_centered = pa_init*numpy.ones( Na, dtype='d' )
pc_init = Uc_init #0.0 # [V]
pc_centered = pc_init*numpy.ones( Nc, dtype='d' )
# Solid conc on anode and cathode
ca_centered = ca_init*numpy.ones( Na*Nra, dtype='d' )
cc_centered = cc_init*numpy.ones( Nc*Nrc, dtype='d' )
# j init
ja = numpy.zeros(Na)
jc = numpy.zeros(Nc)
num_diff_vars = len(c_centered)+len(ca_centered)+len(cc_centered)
num_algr_vars = len(ja)+len(jc)+len(p_centered)+len(pa_centered)+len(pc_centered)
#The initial conditons
y0 = numpy.concatenate( [c_centered, ca_centered, cc_centered, ja, jc, p_centered, pa_centered, pc_centered] ) #Initial conditions
yd0 = [0.0 for i in range(len(y0))] #Initial conditions
#Create an Assimulo implicit problem
imp_mod = MyProblem(Na,Ns,Nc,Nra,Nrc,X,Ra,Rc,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
#Sets the options to the problem
imp_mod.algvar = [1.0 for i in range(num_diff_vars)] + [0.0 for i in range(num_algr_vars)] #Set the algebraic components
#Create an Assimulo implicit solver (IDA)
imp_sim = IDA(imp_mod) #Create a IDA solver
#Sets the paramters
imp_sim.atol = 1e-5 #Default 1e-6
imp_sim.rtol = 1e-5 #Default 1e-6
imp_sim.suppress_alg = True #Suppres the algebraic variables on the error test
imp_sim.display_progress = False
imp_sim.verbosity = 50
imp_sim.report_continuously = True
imp_sim.time_limit = 10.
### Simulate
t01, t02 = 0.1, 0.2
imp_mod.set_iapp( I_app/10. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
ta, ya, yda = imp_sim.simulate(t01,2)
imp_mod.set_iapp( I_app/2. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
tb, yb, ydb = imp_sim.simulate(t02,2)
# Sim step 1
#imp_mod.set_iapp( I_app )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
#t1, y1, yd1 = imp_sim.simulate(1.0/Crate*3600.0,100)
NT = 100
time = numpy.linspace( t02+0.1, 1.0/Crate*3600.0, NT )
t_out = [ 0 for ts in time ]
V_out = [ 0 for ts in time ]
y_out = numpy.zeros( [len(time), yb.shape[ 1]] )
yd_out = numpy.zeros( [len(time), ydb.shape[1]] )
it = 0
V_cell = imp_mod.get_voltage( yb[-1,:].flatten() )
ce_now = yb[-1,imp_mod.ce_inds].flatten()
print 'V_cell prior to time loop:', V_cell
imp_mod.set_iapp( I_app )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
sim_stopped = 0
while V_cell > Vcut and max(ce_now)<max(ce_lims) and min(ce_now)>min(ce_lims) and not sim_stopped and it<len(time) :
try :
ti, yi, ydi = imp_sim.simulate(time[it],1)
except :
ti = [t_out[it-1],t_out[it-1]]
yi = y_out[ it-2:it,:]
ydi = yd_out[ it-2:it,:]
sim_stopped = 1
print 'Sim stopped due time integration failure.'
t_out[ it] = ti[ -1 ]
y_out[ it,:] = yi[ -1,:]
yd_out[it,:] = ydi[-1,:]
V_cell = imp_mod.get_voltage( y_out[it,:] )
V_out[it] = V_cell
ce_now = y_out[it,imp_mod.ce_inds]
print 'time:',round(t_out[it],3), ' | Voltage:', round(V_cell,3)
if V_cell < Vcut :
print '\n','Vcut stopped simulation.'
elif max(ce_now)>max(ce_lims) :
print '\n','ce max stopped simulation.'
elif min(ce_now)<min(ce_lims) :
print '\n','ce min stopped simulation.'
it+=1
if it < len(time) :
t_out = t_out[ :it ]
V_out = V_out[ :it ]
y_out = y_out[ :it,:]
yd_out = yd_out[:it,:]
ce = y_out[:,imp_mod.ce_inds]
f,ax=plt.subplots(1,2)
ax[0].plot( imp_mod.x_m, ce.T )
ax[1].plot( t_out, V_out )
plt.show()
t1 = t_out
y1 = y_out
yd1 = yd_out
print t_out[it-1]
# Sim step 2
imp_mod.set_iapp( 0.0 )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
t2, y2, yd2 = imp_sim.simulate(t_out[-1]*1.5,100)
plot_on = 1
if plot_on :
# extract variables
im = imp_mod
ce_1 = y1[:,im.ce_inds]
ca_1 = y1[:,im.csa_inds]
cc_1 = y1[:,im.csc_inds]
ca1_r = [ numpy.reshape( ca_1[it,:], (im.Na, im.Nra) ) for it in range(len(t1)) ]
cc1_r = [ numpy.reshape( cc_1[it,:], (im.Nc, im.Nrc) ) for it in range(len(t1)) ]
pe_1 = y1[:,im.pe_inds]
pa_1 = y1[:,im.pa_inds]
pc_1 = y1[:,im.pc_inds]
ja_1 = y1[:,im.ja_inds]
jc_1 = y1[:,im.jc_inds]
ce_2 = y2[:,im.ce_inds]
ca_2 = y2[:,im.csa_inds]
cc_2 = y2[:,im.csc_inds]
ca2_r = [ numpy.reshape( ca_2[it,:], (im.Na, im.Nra) ) for it in range(len(t2)) ]
cc2_r = [ numpy.reshape( cc_2[it,:], (im.Nc, im.Nrc) ) for it in range(len(t2)) ]
pe_2 = y2[:,im.pe_inds]
pa_2 = y2[:,im.pa_inds]
pc_2 = y2[:,im.pc_inds]
ja_2 = y2[:,im.ja_inds]
jc_2 = y2[:,im.jc_inds]
#Plot
# t1
# Plot through space
f, ax = plt.subplots(2,4)
# ce vs x
ax[0,0].plot(imp_mod.x_m*1e6,ce_1.T)
# pe vs x
ax[0,1].plot(imp_mod.x_m*1e6,pe_1.T)
# pa vs x
ax[0,2].plot(imp_mod.x_m_a*1e6,pa_1.T)
# pc vs x
ax[0,2].plot(imp_mod.x_m_c*1e6,pc_1.T)
ax[0,0].set_title('t1 c')
ax[0,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0,1].set_title('t1 p')
ax[0,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,1].set_ylabel('E-lyte Potential [V]')
ax[0,2].set_title('t1 p solid')
ax[0,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,2].set_ylabel('Solid Potential [V]')
#ax[0,3].set_title('t1 conc solid')
#ax[0,3].set_xlabel('Cell Thickness [$\mu$m]')
#ax[0,3].set_ylabel('Solid Conc. [mol/m$^3$]')
# t2
ax[1,0].plot(imp_mod.x_m*1e6,ce_2.T)
ax[1,1].plot(imp_mod.x_m*1e6,pe_2.T)
ax[1,2].plot(imp_mod.x_m_a*1e6,pa_2.T)
ax[1,2].plot(imp_mod.x_m_c*1e6,pc_2.T)
ax[1,0].set_title('t2 c')
ax[1,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[1,1].set_title('t2 p e-lyte')
ax[1,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,1].set_ylabel('E-lyte Potential [V]')
ax[1,2].set_title('t2 p solid')
ax[1,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,2].set_ylabel('Solid Potential [V]')
#ax[1,3].set_title('t2 Solid Conc.')
#ax[1,3].set_xlabel('Cell Thickness [$\mu$m]')
#ax[1,3].set_ylabel('Solid Conc. [mol/m$^3$]')
plt.tight_layout()
fcs, ax = plt.subplots(1,2)
ira, irc = im.Nra-1, im.Nrc-1
for it in range(len(t1)) :
# ca vs x
ax[0].plot(imp_mod.x_m_a*1e6, ca1_r[it][:,ira])
# cc vs x
ax[0].plot(imp_mod.x_m_c*1e6, cc1_r[it][:,irc])
for it in range(len(t1)) :
ax[1].plot(imp_mod.x_m_a*1e6, ca2_r[it][:,ira])
ax[1].plot(imp_mod.x_m_c*1e6, cc2_r[it][:,irc])
ax[0].set_title('t1 Solid Conc.')
ax[1].set_title('t2 Solid Conc.')
ax[0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0].set_ylabel('Solid Conc. [mol/m$^3$]')
plt.tight_layout()
fcsr, ax = plt.subplots(1,2)
ixa, ixc = im.Na-1, 0
for it in range(len(t1)) :
# ca vs x
ax[0].plot(imp_mod.r_m_a*1e6, ca1_r[it][ixa,:])
# cc vs x
ax[0].plot(imp_mod.r_m_c*1e6, cc1_r[it][ixc,:])
for it in range(len(t1)) :
ax[1].plot(imp_mod.r_m_a*1e6, ca2_r[it][ixa,:])
ax[1].plot(imp_mod.r_m_c*1e6, cc2_r[it][ixc,:])
ax[0].set_title('t1 Solid Conc.')
ax[1].set_title('t2 Solid Conc.')
ax[0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0].set_ylabel('Solid Conc. [mol/m$^3$]')
plt.tight_layout()
# Plot through time
f, ax = plt.subplots(1,3)
ax[0].plot(t1,ce_1)
ax[1].plot(t1,pe_1)
ax[2].plot(t1,pa_1)
ax[2].plot(t1,pc_1)
#ax[3].plot(t1,ca_1)
#ax[3].plot(t1,cc_1)
ax[0].plot(t2,ce_2)
ax[1].plot(t2,pe_2)
ax[2].plot(t2,pa_2)
ax[2].plot(t2,pc_2)
#ax[3].plot(t2,ca_2)
#ax[3].plot(t2,cc_2)
ax[0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0].set_xlabel('Time [s]')
ax[1].set_ylabel('E-lyte Potential [V]')
ax[1].set_xlabel('Time [s]')
ax[2].set_ylabel('Solid Potential [V]')
ax[2].set_xlabel('Time [s]')
#ax[3].set_ylabel('Solid Conc. [mol/m$^3$]')
#ax[3].set_xlabel('Time [s]')
plt.tight_layout()
plt.figure()
plt.plot( t1, pc_1[:,-1] - pa_1[:,0] )
plt.plot( t2, pc_2[:,-1] - pa_2[:,0] )
plt.show()
#
#
#
#imp_mod = MyProblem(Na,Ns,Nc,Nra,Nrc,X,Ra,Rc,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
#
## my own time solver
#
#delta_t = 1.0
#tf = 10.
#time = [ i*delta_t for i in range(int(tf/delta_t)+1) ]
#
#print time
#
#x_out = numpy.zeros( [num_diff_vars, len(time)] )
#z_out = numpy.zeros( [num_algr_vars, len(time)] )
#
#x_out[:,0] = numpy.concatenate( [c_centered, ca_centered, cc_centered] )
#z_out[:,0] = numpy.concatenate( [ja, jc, p_centered, pa_centered, pc_centered] )
#
#for it, t in enumerate(time[1:]) :
#
# if it == 0 :
# Cur_vec = [ 0.0, 0.0, 0.1*I_app ]
# elif it == 1 :
# Cur_vec = [ 0.0, 0.1*I_app, 0.5*I_app ]
# elif it == 2 :
# Cur_vec = [ 0.1*I_app, 0.5*I_app, I_app ]
# elif it == 3 :
# Cur_vec = [ 0.5*I_app, I_app, I_app ]
# else :
# Cur_vec = [ I_app, I_app, I_app ]
#
# x_out[:,it+1], z_out[:,it+1], newtonStats = imp_mod.cn_solver( x_out[:,it], z_out[:,it], Cur_vec, delta_t )
#
#plt.close()
#f, ax = plt.subplots(1,3)
#ax[0].plot( imp_mod.x_m, x_out[:imp_mod.N] )
#
#ax[1].plot( imp_mod.x_m, z_out[imp_mod.Na+imp_mod.Nc:imp_mod.Na+imp_mod.Nc+imp_mod.N,:-1] )
#
#ax[2].plot( imp_mod.x_m_a, z_out[-imp_mod.Na-imp_mod.Nc:-imp_mod.Nc,:-1] )
#ax[2].plot( imp_mod.x_m_c, z_out[-imp_mod.Nc:,:-1] )
#plt.show()
#
#print z_out
#
#
# def dae_system( self, x, z, Input, get_mats=0 ) :
#
# self.set_iapp( Input )
#
# y = numpy.concatenate([x,z])
#
# ## Parse out the states
# # E-lyte conc
# ce = y[ self.ce_inds]
#
# # Solid conc a:anode, c:cathode
# csa = y[ self.csa_inds]
# csc = y[ self.csc_inds]
#
# # Reaction (Butler-Volmer Kinetics)
# ja_rxn = y[self.ja_inds]
# jc_rxn = y[self.jc_inds]
#
# # E-lyte potential
# phi = y[self.pe_inds]
#
# # Solid potential
# phi_s_a = y[self.pa_inds]
# phi_s_c = y[self.pc_inds]
#
# ## Grab state dependent matrices
# # For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
# A_ce = self.build_Ace_mat( ce )
# A_pe = self.build_Ape_mat( ce )
# B_pe = self.build_Bpe_mat( ce )
#
# ## Compute extra variables
# # For the reaction kinetics
## csa_ss = numpy.array( [ csa[(i+1)*(self.Nra)-1] for i in range(self.Na) ] )
## csc_ss = numpy.array( [ csc[(i+1)*(self.Nrc)-1] for i in range(self.Nc) ] )
# csa_ss = (self.C_cs_a.dot(csa)).flatten() + (self.D_cs_a.dot(ja_rxn)).flatten()
# csc_ss = (self.C_cs_c.dot(csc)).flatten() + (self.D_cs_c.dot(jc_rxn)).flatten()
#
# xa = csa /self.csa_max
# xc = csc /self.csc_max
# xa_ss = csa_ss/self.csa_max
# xc_ss = csc_ss/self.csc_max
#
# Uref_a = self.uref_a( xa_ss ) # anode equilibrium potential
# Uref_c = self.uref_c( xc_ss ) # cathode equilibrium potential
#
# eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
# eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
#
## ja = 2.0*self.io_a * numpy.sqrt( ce[:self.Na]/self.ce_nom * (1.0 - csa_ss/self.csa_max) * (csa_ss/self.csa_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_a )
## jc = 2.0*self.io_c * numpy.sqrt( ce[-self.Nc:]/self.ce_nom * (1.0 - csc_ss/self.csc_max) * (csc_ss/self.csc_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_c )
# ja = 2.0*self.io_a/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
# jc = 2.0*self.io_c/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
#
# j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
#
# ## Compute the residuals
# # Time deriv components
# r1 = ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
#
# r2 = ( (self.A_cs_a.dot(csa)).flatten() + (self.B_cs_a.dot(ja_rxn)).flatten() ) # Anode conc
# r3 = ( (self.A_cs_c.dot(csc)).flatten() + (self.B_cs_c.dot(jc_rxn)).flatten() ) # Cathode conc
#
# # Algebraic components
# r4 = ja_rxn - ja
# r5 = jc_rxn - jc
#
# r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
#
# r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential
# r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
#
# if get_mats :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6, r7, r8] ), { 'A_ce':A_ce, 'A_pe':A_pe, 'B_pe':B_pe, 'csa':csa, 'csc':csc, 'csa_ss':csa_ss, 'csc_ss':csc_ss, 'xa':xa, 'xc':xc, 'xa_ss':xa_ss, 'xc_ss':xc_ss, 'eta_a':eta_a, 'eta_c':eta_c }
# else :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6, r7, r8] )
#
# return res_out
#
# def dae_system_num( self, y ) :
#
# ## Parse out the states
# # E-lyte conc
# ce = y[ self.ce_inds]
#
# # Solid conc a:anode, c:cathode
# csa = y[ self.csa_inds]
# csc = y[ self.csc_inds]
#
# # Reaction (Butler-Volmer Kinetics)
# ja_rxn = y[self.ja_inds]
# jc_rxn = y[self.jc_inds]
#
# # E-lyte potential
# phi = y[self.pe_inds]
#
# # Solid potential
# phi_s_a = y[self.pa_inds]
# phi_s_c = y[self.pc_inds]
#
# ## Grab state dependent matrices
# # For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
# A_ce = self.build_Ace_mat( ce )
# A_pe = self.build_Ape_mat( ce )
# B_pe = self.build_Bpe_mat( ce )
#
# ## Compute extra variables
# # For the reaction kinetics
## csa_ss = numpy.array( [ csa[(i+1)*(self.Nra)-1] for i in range(self.Na) ] )
## csc_ss = numpy.array( [ csc[(i+1)*(self.Nrc)-1] for i in range(self.Nc) ] )
# csa_ss = (self.C_cs_a.dot(csa)).flatten() + (self.D_cs_a.dot(ja_rxn)).flatten()
# csc_ss = (self.C_cs_c.dot(csc)).flatten() + (self.D_cs_c.dot(jc_rxn)).flatten()
#
# xa = csa /self.csa_max
# xc = csc /self.csc_max
# xa_ss = csa_ss/self.csa_max
# xc_ss = csc_ss/self.csc_max
#
# Uref_a = self.uref_a( xa_ss ) # anode equilibrium potential
# Uref_c = self.uref_c( xc_ss ) # cathode equilibrium potential
#
# eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
# eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
#
## ja = 2.0*self.io_a * numpy.sqrt( ce[:self.Na]/self.ce_nom * (1.0 - csa_ss/self.csa_max) * (csa_ss/self.csa_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_a )
## jc = 2.0*self.io_c * numpy.sqrt( ce[-self.Nc:]/self.ce_nom * (1.0 - csc_ss/self.csc_max) * (csc_ss/self.csc_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_c )
# ja = 2.0*self.io_a/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
# jc = 2.0*self.io_c/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
#
# j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
#
# ## Compute the residuals
# # Time deriv components
# r1 = ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
#
# r2 = ( (self.A_cs_a.dot(csa)).flatten() + (self.B_cs_a.dot(ja_rxn)).flatten() ) # Anode conc
# r3 = ( (self.A_cs_c.dot(csc)).flatten() + (self.B_cs_c.dot(jc_rxn)).flatten() ) # Cathode conc
#
# # Algebraic components
# r4 = ja_rxn - ja
# r5 = jc_rxn - jc
#
# r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
#
# r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential
# r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
#
# res_out = numpy.concatenate( [r1,r2,r3, r4, r5, r6, r7, r8] )
#
# return res_out
#
#
# def jac_system( self, mats ) :
#
# A_ce = mats['A_ce']
# A_pe = mats['A_pe']
# B_pe = mats['B_pe']
#
# Bjac_a = self.build_Bjac_mat( mats['eta_a'], 2.0*self.io_a/self.F, 0.5*self.F/(self.R_gas*self.T) )
# Bjac_c = self.build_Bjac_mat( mats['eta_c'], 2.0*self.io_c/self.F, 0.5*self.F/(self.R_gas*self.T) )
#
# DUDcsa_ss = numpy.diag( (1.0/self.csa_max)*self.duref_a(mats['xa_ss']) )
# DUDcsc_ss = numpy.diag( (1.0/self.csc_max)*self.duref_c(mats['xc_ss']) )
#
# A_ja = numpy.diag(numpy.ones(self.Na)) - (Bjac_a.dot(-1.0*DUDcsa_ss*1.0)).dot( self.D_cs_a )
# A_jc = numpy.diag(numpy.ones(self.Nc)) - (Bjac_c.dot(-1.0*DUDcsc_ss*1.0)).dot( self.D_cs_c )
#
# ## fx
# fx = scipy.linalg.block_diag( A_ce, self.A_cs_a, self.A_cs_c )
# ##
#
# ## fz
# fz = numpy.zeros( [self.num_diff_vars, self.num_algr_vars] )
# # ce vs j
# fz[ numpy.ix_(self.ce_inds, self.ja_inds2) ] = self.B_ce[:, :self.Na ]
# fz[ numpy.ix_(self.ce_inds, self.jc_inds2) ] = self.B_ce[:, -self.Nc:]
# # cs vs j
# fz[ numpy.ix_(self.csa_inds, self.ja_inds2) ] = self.B_cs_a
# fz[ numpy.ix_(self.csc_inds, self.jc_inds2) ] = self.B_cs_c
# ##
#
# ## gx
# gx = numpy.zeros( [self.num_algr_vars, self.num_diff_vars] )
# # j vs cs_ss
# gx[ numpy.ix_(self.ja_inds2, self.csa_inds) ] = -(Bjac_a.dot(-1.0*DUDcsa_ss*1.0)).dot(self.C_cs_a)
# gx[ numpy.ix_(self.jc_inds2, self.csc_inds) ] = -(Bjac_c.dot(-1.0*DUDcsc_ss*1.0)).dot(self.C_cs_c)
# # phi_e vs ce
# gx[ numpy.ix_(self.pe_inds2, self.ce_inds) ] = -B_pe
# ##
#
# ## gz
# # z vs z
# gz0 = scipy.linalg.block_diag( A_ja, A_jc, A_pe, self.A_ps_a, self.A_ps_c )
# # z cross coupling
# gz00 = numpy.zeros_like( gz0 )
# # phi_e vs j
# gz00[ numpy.ix_(self.pe_inds2, self.ja_inds2) ] = self.B2_pe[:,:self.Na]
# gz00[ numpy.ix_(self.pe_inds2, self.jc_inds2) ] = self.B2_pe[:,-self.Nc:]
# # phi_s vs j
# gz00[ numpy.ix_(self.pa_inds2, self.ja_inds2) ] = -self.B_ps_a
# gz00[ numpy.ix_(self.pc_inds2, self.jc_inds2) ] = -self.B_ps_c
# # j vs phi_s
# gz00[ numpy.ix_(self.ja_inds2, self.pa_inds2) ] = -Bjac_a*( 1.0)
# gz00[ numpy.ix_(self.jc_inds2, self.pc_inds2) ] = -Bjac_c*( 1.0)
# # j vs phi_e
# gz00[ numpy.ix_(self.ja_inds2, self.pe_a_inds2) ] = -Bjac_a*(-1.0)
# gz00[ numpy.ix_(self.jc_inds2, self.pe_c_inds2) ] = -Bjac_c*(-1.0)
#
# gz = gz0 + gz00
#
# return fx, fz, gx, gz
#
#
# def cn_solver( self, x, z, Cur_vec, delta_t ) :
# """
# Crank-Nicholson solver for marching through time
# """
# Cur_prev, Cur, Cur_nxt = Cur_vec[0], Cur_vec[1], Cur_vec[2]
#
# maxIters = 20
# tol = 1e-5
#
# Nx = self.num_diff_vars
# Nz = self.num_algr_vars
#
# x_nxt = numpy.zeros( (Nx,maxIters), dtype='d' )
# z_nxt = numpy.zeros( (Nz,maxIters), dtype='d' )
#
# relres = numpy.zeros( maxIters, dtype='d' )
# relres[0] = 1.0
#
# var_flag = {'lim_on':0}
#
# # Solve for consistent ICs
# if Cur != Cur_prev :
# z_cons = numpy.zeros( (Nz, maxIters), dtype='d' )
# z_cons[:,0] = deepcopy(z)
#
# junk_f, g, mats = self.dae_system( x, z, Cur, get_mats=1 )
# for idx in range(maxIters-1) :
# (junk_fx, junk_fz, junk_gx, g_z) = self.jac_system( mats )
#
# Delta_z = -sparseSolve( sparseMat(g_z), g )
# z_cons[:,idx+1] = z_cons[:,idx] + Delta_z
#
# relres_z = numpy.linalg.norm(Delta_z,numpy.inf) / numpy.linalg.norm(z,numpy.inf)
# if relres_z < tol :
# break
# elif idx == maxIters-1 :
# print(('Warning: Max Newton iterations reached for consistency | RelChange=',relres_z*100.0))
#
# z = z_cons[:,idx+1]
#
# #print Cur
#
# f, g = self.dae_system( deepcopy(x), deepcopy(z), Cur )
#
# x_nxt[:,0] = deepcopy(x)
# z_nxt[:,0] = deepcopy(z)
#
# # plt.figure(1)
# # plt.plot( x_nxt[:,0] )
# # plt.plot( z_nxt[:,0] )
# # plt.show()
#
# for idx in range(maxIters-1) :
# f_nxt, g_nxt, mats = self.dae_system( x_nxt[:,idx], z_nxt[:,idx], Cur_nxt, get_mats=1 )
#
## print 'x:',x.shape
## print 'xnxt:',x_nxt[:,idx].shape
## print 'f:',f.shape
## print 'fnxt:',f_nxt.shape
#
## print 'z:', z.shape
## print 'g:', g.shape
## print 'znxt:', z_nxt[:,idx].shape
## print 'gnxt:', g_nxt.shape
#
# F1 = x - x_nxt[:,idx] + delta_t/2.*( f+f_nxt )
# F2 = g_nxt
# F = numpy.concatenate( (F1, F2), axis=0 )
#
# fx, fz, gx, gz = self.jac_system( mats )
#
#
# jmat = numpy.concatenate( (numpy.concatenate( (fx, fz), axis=1 ),
# numpy.concatenate( (gx, gz), axis=1 )) )
#
# self.Input = Cur_nxt
# jmat_num = compute_deriv( self.dae_system_num, numpy.concatenate( (x_nxt[:,idx], z_nxt[:,idx]) ) )
#
# fx_num = jmat_num[:self.num_diff_vars,:self.num_diff_vars]
# fz_num = jmat_num[:self.num_diff_vars,self.num_diff_vars:]
# gx_num = jmat_num[self.num_diff_vars:,:self.num_diff_vars]
# gz_num = jmat_num[self.num_diff_vars:,self.num_diff_vars:]
#
# F1x_num = -sparse.eye(len(x)) + delta_t/2. * fx_num
# F1z_num = delta_t/2. * fz_num
#
# F1_x = -sparse.eye(len(x)) + delta_t/2. * fx
# F1_z = delta_t/2. * fz
# F2_x = gx
# F2_z = gz
#
# J = numpy.concatenate( (numpy.concatenate( (F1_x, F1_z), axis=1 ),
# numpy.concatenate( (F2_x, F2_z), axis=1 )) )
#
## Jnum = numpy.concatenate( (numpy.concatenate( (F1x_num, F1z_num), axis=1 ),
## numpy.concatenate( (gx_num , gz_num ), axis=1 )) )
#
#
# Jsp = sparseMat( J )
#
## Jspnum = sparseMat( Jnum )
#
## Delta_y = -sparseSolve( Jspnum, F )
# Delta_y = -sparseSolve( Jsp, F )
#
#
# x_nxt[:,idx+1] = x_nxt[:,idx] + Delta_y[:Nx]
# z_nxt[:,idx+1] = z_nxt[:,idx] + Delta_y[Nx:]
#
# # plt.figure(1)
# # plt.plot(Delta_y)
#
# # plt.figure(2)
# # plt.plot(x_nxt[:,idx])
# # plt.plot(x_nxt[:,idx+1])
#
## plt.show()
#
# y = numpy.concatenate( (x_nxt[:,idx+1], z_nxt[:,idx+1]), axis=0 )
# relres[idx+1] = numpy.linalg.norm( Delta_y, numpy.inf ) / numpy.linalg.norm( y, numpy.inf )
#
# if (relres[idx+1]<tol) and (numpy.linalg.norm(F, numpy.inf)<tol) :
# break
# elif idx==maxIters-1 :
# print( ('Warning: Max Newton iterations reached in main CN loop | RelChange = ',relres[-1]*100.0) )
#
# x_nxtf = x_nxt[:,idx+1]
# z_nxtf = z_nxt[:,idx+1]
#
# newtonStats = {'var_flag':var_flag}
# newtonStats['iters'] = idx
# newtonStats['relres'] = relres
#
# print '###############################################'
# print 'numpy.allclose( fx, fx_num, rtol=0.001 ):', numpy.allclose( fx, fx_num, rtol=0.001 )
#
# print '###############################################'
# print 'numpy.allclose( fz, fz_num, rtol=0.001 ):', numpy.allclose( fz, fz_num, rtol=0.001 )
#
# print '###############################################'
# print 'numpy.allclose( gx, gx_num, rtol=0.001 ):', numpy.allclose( gx, gx_num, rtol=0.001 )
#
# print '###############################################'
# print 'numpy.allclose( gz, gz_num, rtol=0.001 ):', numpy.allclose( gz, gz_num, rtol=0.001 )
#
# print '###############################################'
# print 'numpy.allclose( jmat, jmat_num, rtol=0.001 ):', numpy.allclose( jmat, jmat_num, rtol=0.001 )
#
# jm1_sp = sps.csr_matrix(jmat)
# jm2_sp = sps.csr_matrix(jmat_num)
#
# fig, ax = plt.subplots(1,2)
# ax[0].spy( jm1_sp )
# ax[0].set_title('Analytical Jacobian')
# ax[1].spy( jm2_sp )
# ax[1].set_title('Numerical Jacobian')
# plt.suptitle( 'numpy.allclose( jmat, jmat_num, rtol=0.001 ):' + str(numpy.allclose( jmat, jmat_num, rtol=0.001 )) )
# plt.show()
#
# print 'Finished t_step'
#
# return x_nxtf, z_nxtf, newtonStats1]
|
gpl-3.0
|
cgroll/j_r_docker
|
plugin/ipythonPlugins/src/dist/python3/beaker_runtime.py
|
2
|
3001
|
# Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, urllib, urllib2, json, pandas, yaml, numpy
# should be inner class to Beaker
class DataFrameEncoder(json.JSONEncoder):
def default(self, obj):
# similarly handle Panels.
# make this extensible by the user to handle their own types.
if type(obj) == pandas.core.frame.DataFrame:
return obj.to_dict(outtype='list')
if type(obj) == pandas.core.series.Series:
return obj.to_dict()
if isinstance(obj, numpy.ndarray) and obj.ndim == 1:
return obj.tolist()
if isinstance(obj, numpy.generic):
return obj.item()
return json.JSONEncoder.default(self, obj)
class Beaker:
"""Runtime support for Python code in Beaker."""
session_id = ''
core_url = '127.0.0.1:' + os.environ['beaker_core_port']
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, core_url, 'beaker',
os.environ['beaker_core_password'])
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_mgr)))
def set4(self, var, val, unset, sync):
args = {'name': var, 'session':self.session_id, 'sync':sync}
if not unset:
args['value'] = json.dumps(val, cls=DataFrameEncoder)
req = urllib2.Request('http://' + self.core_url + '/rest/namespace/set',
urllib.urlencode(args))
conn = urllib2.urlopen(req)
reply = conn.read()
if reply != 'ok':
raise NameError(reply)
def get(self, var):
req = urllib2.Request('http://' + self.core_url + '/rest/namespace/get?' +
urllib.urlencode({'name': var, 'session':self.session_id}))
conn = urllib2.urlopen(req)
result = yaml.load(conn.read()) # would use json.loads but it returns unicode
if not result['defined']:
raise NameError('name \'' + var + '\' is not defined in notebook namespace')
return result['value']
def set_session(self, id):
self.session_id = id
def set(self, var, val):
return self.set4(var, val, False, True)
def __setattr__(self, name, value):
if 'session_id' == name:
self.__dict__['session_id'] = value
return
return self.set(name, value)
def __getattr__(self, name):
return self.get(name)
|
mit
|
antiface/python-acoustics
|
acoustics/aio.py
|
2
|
3474
|
from __future__ import unicode_literals
import csv
import io
import re
try:
import pandas as pd
except ImportError:
raise Exception("Pandas is required to use this module.")
def read_csv_cirrus(filename):
"""Read a Cirrus CSV file. Currently exists support for some types of
CSV files extracted with NoiseTools. There is no support for CSVs related
with occupational noise.
If there are NC and NR values in the csv file, they will be stored in the
returned object with attributes ``nc`` and ``nr``. If the CSV file contains
time history, you can access to date and time with the ``time`` attribute.
Also, it is possible to know the integration time with the
``integration_time`` attribute.
:param filename: CSV file name.
:returns: Pandas dataframe with all data extracted from the CSV file.
:rtype: Pandas dataframe.
"""
with open(filename, "r") as csvfile:
csvreader = csvfile.read()
csvreader = re.sub(r" dB", "", csvreader) # Clean " dB" from data
dialect = csv.Sniffer().sniff(csvreader, delimiters=",;")
separator = dialect.delimiter
# Guess decimal separator
decimal_sep = re.search(r"\"\d{2,3}"
r"(\.|,)" # Decimal separator
r"\d{1,2}\"",
csvreader).group(1)
n_cols = re.search("(.+)\n", csvreader).group(1).count(separator) + 1
if n_cols < 5:
unsorted_data = []
pdindex = ["Z"]
for i, c in enumerate(csvreader.splitlines()):
if c[:4] == '"NR"':
nr = int(re.search(r"\d{2}", c).group(0))
continue
elif c[:4] == '"NC"':
nc = int(re.search(r"\d{2}", c).group(0))
continue
if i != 0:
unsorted_data.append(c.split(separator))
else:
if n_cols == 3:
pdindex.append(c[-2:-1])
elif n_cols == 4:
pdindex.append("A")
pdindex.append("C")
# Create a sorted temporary csv-like file
csv_data = list(zip(*unsorted_data))
temp_csv = ""
for row in csv_data:
temp_csv += separator.join(row) + "\n"
# Then, read it with pandas
data = pd.read_csv(io.StringIO(temp_csv), sep=separator,
decimal=decimal_sep)
# Assign NC and NR data if they are present
try:
data.nc = nc
data.nr = nr
except:
pass
# If the csv file contains global data from the "Details" tab in
# NoiseTools, skip row names
if n_cols != 2:
data.index = pdindex
else:
data = pd.read_csv(filename, parse_dates=[[0, 1]], sep=separator,
decimal=decimal_sep)
# Fix time name column
en_columns = data.columns.values
en_columns[0] = "time"
data.columns = en_columns
# Guess integration time with statistical mode because the csv could
# have been cleaned from unwanted noise
data["time"] = pd.to_datetime(data.time)
delta = data.time.diff().fillna(0)
int_time = int(delta.mode()) * 1e-9 # Mode and change from ns to s
if round(int_time, 2) == 0.06: # Fix for 1/16 s
int_time = 0.0625
data.integration_time = int_time
return data
|
bsd-3-clause
|
feranick/SpectralMachine
|
Archive/SpectraLearnPredict2/SpectraLearnPredict2/slp/slp_keras.py
|
1
|
13951
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* SpectraLearnPredict2 - Keras
* Perform Machine Learning on Spectroscopy Data.
*
* Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means
*
* By: Nicola Ferralis <[email protected]>
*
***********************************************************
'''
import matplotlib
if matplotlib.get_backend() == 'TkAgg':
matplotlib.use('Agg')
import numpy as np
import sys, os.path, getopt, glob, csv, pydot, graphviz
import random, time, configparser, os, pickle
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
from .slp_config import *
#**********************************************
''' Format input data for Estimator '''
#**********************************************
def input_fn(A, Cl2):
import tensorflow as tf
x = tf.constant(A.astype(np.float32))
y = tf.constant(Cl2)
return x,y
#********************************************************************************
''' Keras '''
''' https://keras.io/getting-started/sequential-model-guide/#examples'''
#********************************************************************************
def trainKeras(En, A, Cl, A_test, Cl_test, Root):
if Configuration().useTF2:
print(" Using tf.keras API")
import tensorflow.keras as keras #tf.keras
opts = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=1) # Tensorflow 2.0
conf = tf.compat.v1.ConfigProto(gpu_options=opts) # Tensorflow 2.0
#gpus = tf.config.experimental.list_physical_devices('GPU')
#if gpus:
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# if dP.setMaxMem:
# tf.config.experimental.set_virtual_device_configuration(
# gpus[0],
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=dP.maxMem)])
def_val_mae = 'val_mae'
def_acc = 'accuracy'
def_val_acc = 'val_accuracy'
else:
opts = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=1)
conf = tf.compat.v1.ConfigProto(gpu_options=opts)
conf.gpu_options.allow_growth = True
if dP.useTFKeras:
print(" Using tf.keras API")
import tensorflow.keras as keras #tf.keras
tf.compat.v1.Session(config=conf)
else:
print(" Using pure keras API")
import keras # pure keras
from keras.backend.tensorflow_backend import set_session
set_session(tf.compat.v1.Session(config=conf))
def_val_mae = 'val_mean_absolute_error'
def_acc = 'acc'
def_val_acc = 'val_acc'
from sklearn import preprocessing
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
tb_directory = "keras_" + str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])
model_directory = "."
if kerasDef.regressor:
model_name = model_directory+"/keras_regressor_"+str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])+".hd5"
else:
model_name = model_directory+"/keras_"+str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])+".hd5"
model_le = model_directory+"/keras_model_le.pkl"
if kerasDef.alwaysRetrain == False:
print(" Training model saved in: ", model_name, "\n")
else:
kerasDef.alwaysImprove = False
print(" Training model not saved\n")
#**********************************************
''' Initialize Estimator and training data '''
#**********************************************
print(' Preprocessing data and classes for Keras\n')
totA = np.vstack((A, A_test))
totCl = np.append(Cl, Cl_test)
if kerasDef.regressor:
Cl2 = np.copy(Cl)
Cl2_test = np.copy(Cl_test)
le = None
else:
numTotClasses = np.unique(totCl).size
le = preprocessing.LabelEncoder()
totCl2 = le.fit_transform(totCl)
Cl2 = le.transform(Cl)
Cl2_test = le.transform(Cl_test)
totCl2 = keras.utils.to_categorical(totCl2, num_classes=np.unique(totCl).size)
Cl2 = keras.utils.to_categorical(Cl2, num_classes=np.unique(totCl).size+1)
Cl2_test = keras.utils.to_categorical(Cl2_test, num_classes=np.unique(totCl).size+1)
print(" Label Encoder saved in:", model_le)
with open(model_le, 'ab') as f:
f.write(pickle.dumps(le))
if kerasDef.fullBatch:
batch_size = A.shape[0]
else:
batch_size = kerasDef.batchSize
printParamKeras(A)
if kerasDef.alwaysImprove == True or os.path.exists(model_name) is False:
model = keras.models.Sequential()
for numLayers in kerasDef.hidden_layers:
model.add(keras.layers.Dense(numLayers,
activation = kerasDef.activation_function,
input_dim=A.shape[1],
kernel_regularizer=keras.regularizers.l2(kerasDef.l2_reg_strength)))
model.add(keras.layers.Dropout(kerasDef.dropout_perc))
if kerasDef.regressor:
model.add(keras.layers.Dense(1))
model.compile(loss='mse',
optimizer=kerasDef.optimizer,
metrics=['mae'])
else:
model.add(keras.layers.Dense(np.unique(totCl).size+1, activation = 'softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=kerasDef.optimizer,
metrics=['accuracy'])
tbLog = keras.callbacks.TensorBoard(log_dir=tb_directory,
histogram_freq=kerasDef.tbHistogramFreq,
write_graph=True, write_images=True)
#tbLog.set_model(model)
tbLogs = [tbLog]
log = model.fit(A, Cl2,
epochs=kerasDef.trainingSteps,
batch_size=batch_size,
callbacks = tbLogs,
verbose = 2,
validation_data=(A_test, Cl2_test))
loss = np.asarray(log.history['loss'])
val_loss = np.asarray(log.history['val_loss'])
if kerasDef.regressor:
accuracy = None
val_acc = None
else:
accuracy = np.asarray(log.history[def_acc])
val_acc = np.asarray(log.history[def_val_acc])
if Configuration().useTF2:
model.save(dP.model_name, save_format='h5')
else:
model.save(dP.model_name)
if kerasDef.plotModel:
from keras.utils import plot_model
keras.utils.plot_model(model, to_file=model_directory+'/keras_MLP_model.png', show_shapes=True)
import matplotlib.pyplot as plt
plt.figure(tight_layout=True)
plotInd = int(len(kerasDef.hidden_layers))*100+11
visibleX = True
for layer in model.layers:
try:
w_layer = layer.get_weights()[0]
ax = plt.subplot(plotInd)
newX = np.arange(En[0], En[-1], (En[-1]-En[0])/w_layer.shape[0])
plt.plot(En, np.interp(En, newX, w_layer[:,0]), label=layer.get_config()['name'])
plt.legend(loc='upper right')
plt.setp(ax.get_xticklabels(), visible=visibleX)
visibleX = False
plotInd +=1
except:
pass
plt.xlabel('Raman shift [1/cm]')
plt.legend(loc='upper right')
plt.savefig('keras_weights_MLP' + '.png', dpi = 160, format = 'png') # Save plot
printModelKeras(model)
print("\n Number of spectra = ",A.shape[0])
print(" Number of points in each spectra = ", A.shape[1])
if kerasDef.regressor == False:
print(" Number unique classes (training): ", np.unique(Cl).size)
print(" Number unique classes (validation):", np.unique(Cl_test).size)
print(" Number unique classes (total): ", np.unique(totCl).size)
printParamKeras(A)
printTrainSummary(accuracy, loss, val_acc, val_loss)
else:
print(" Retreaving training model from: ", model_name,"\n")
model = keras.models.load_model(model_name)
printModelKeras(model)
printParamKeras(A)
score = model.evaluate(A_test, Cl2_test, batch_size=batch_size)
printEvalSummary(model_name, score)
return model, le
#***********************************************
''' Summary visualization panels '''
#***********************************************
def printTrainSummary(accuracy,loss, val_acc, val_loss):
if kerasDef.regressor:
print('\n ========================================================')
print(' \033[1mKeras MLP - Regressor\033[0m - Training Summary')
print(' ========================================================')
print(" \033[1mLoss\033[0m - Average: {0:.4f}; Min: {1:.4f}; Last: {2:.4f}".format(np.average(loss), np.amin(loss), loss[-1]))
print(' ========================================================\n')
else:
print('\n ================================================')
print(' \033[1mKeras MLP - Classifier \033[0m - Training Summary')
print(' ================================================')
print(" Accuracy - Average: {0:.2f}%; Max: {1:.2f}%".format(100*np.average(accuracy), 100*np.amax(accuracy)))
print(" Loss - Average: {0:.4f}; Min: {1:.4f}".format(np.average(loss), np.amin(loss)))
print("\n Validation ({0:.0f}%) - Average: {1:.2f}%; Max: {2:.2f}%".format(100*preprocDef.percentCrossValid,
100*np.average(val_acc), 100*np.amax(val_acc)))
print(" Loss - Average: {0:.4f}; Min: {1:.4f}".format(np.average(val_loss), np.amin(val_loss)))
print(' ================================================\n\n')
def printEvalSummary(model_name, score):
print('\n\n ================================================')
print(' \033[1mKeras MLP\033[0m - Evaluation Summary')
print(' ================================================')
print(" Evaluation ({0:.0f}%) - Loss: {1:.4f}".format(100*preprocDef.percentCrossValid,
score[0]))
if kerasDef.regressor == False:
print(" Accuracy: {0:.2f}%".format(100*score[1]))
print(" Global step: {:d}".format(kerasDef.trainingSteps))
if os.path.exists(model_name) is True:
print("\n Model saved in:", model_name)
print(' ================================================\n')
def printModelKeras(model):
print('\n ================================================')
print(' \033[1mKeras MLP\033[0m - Model Configuration')
print(' ================================================')
for conf in model.get_config():
print(conf,"\n")
model.summary()
def printParamKeras(A):
print('\n ================================================')
if kerasDef.regressor:
print(' \033[1mKeras MLP - Regressor \033[0m - Parameters')
else:
print(' \033[1mKeras MLP - Classifier \033[0m - Parameters')
print(' ================================================')
print(' Optimizer:',kerasDef.optimizer_tag,
'\n Hidden layers:', kerasDef.hidden_layers,
'\n Activation function:',kerasDef.activation_function,
'\n L2:',kerasDef.l2_reg_strength,
'\n Dropout:', kerasDef.dropout_perc,
'\n Learning rate:', kerasDef.learning_rate,
'\n Learning decay rate:', kerasDef.learning_decay_rate)
if kerasDef.fullBatch:
print(' Full batch size: {0:d} spectra, {1:.3f} Mb'.format(A.shape[0],(1e-6*A.size*A.itemsize)))
else:
print(' Batch size:', kerasDef.batchSize)
print(' ================================================\n')
#********************************************************************************
''' Predict using Keras model '''
#********************************************************************************
def predKeras(model, le, R, Cl):
if kerasDef.useTFKeras:
import tensorflow.keras as keras #tf.keras
else:
import keras # pure keras
from sklearn import preprocessing
if kerasDef.regressor:
predictions = model.predict(R).flatten()[0]
print(' ========================================================')
print(' \033[1mKeras MLP - Regressor\033[0m - Prediction')
print(' ========================================================')
predValue = predictions
print('\033[1m\n Predicted value = {0:.4f}\033[0m\n'.format(predValue))
print(' ========================================================\n')
predProb = 0
else:
predictions = model.predict(R, verbose=1)
pred_class = np.argmax(predictions)
if pred_class.size >0:
predValue = le.inverse_transform([pred_class])[0]
else:
predValue = 0
predProb = round(100*predictions[0][pred_class],2)
print('\n ==================================')
print(' \033[1mKeras\033[0m - Probability >',str(kerasDef.thresholdProbabilityPred),'%')
print(' ==================================')
print(' Prediction\tProbability [%]')
for i in range(len(predictions[0])-1):
if predictions[0][i]>kerasDef.thresholdProbabilityPred:
print(' ',str(np.unique(Cl)[i]),'\t\t',
str('{:.2f}'.format(100*predictions[0][i])))
print(' ==================================')
print('\033[1m' + '\n Predicted value (Keras) = ' + str(predValue) +
' (probability = ' + str(predProb) + '%)\033[0m\n')
return predValue, predProb
|
gpl-3.0
|
fabianp/scikit-learn
|
sklearn/decomposition/base.py
|
313
|
5647
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.