repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
djgagne/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
281
|
1829
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
|
bsd-3-clause
|
oaelhara/numbbo
|
code-postprocessing/bbob_pproc/comp2/ppscatter.py
|
1
|
17703
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Scatter Plots.
For two algorithms, this generates the scatter plot of log(ERT1(df)) vs.
log(ERT0(df)), where ERT0(df) is the ERT of the reference algorithm,
ERT1(df) is the ERT of the algorithm of concern, both for target
precision df.
Different symbols are used for different dimension (see
:py:data:`markers` for the order of the markers, :py:data:`colors` for
the corresponding colors).
The target precisions considered are in :py:data:`targets`: by
default 46 targets are uniformly spread on the log-scale in
10**[-8:2].
Boxes correspond to the maximum numbers of function evaluations for
each algorithm in each dimension.
"""
from __future__ import absolute_import
"""For two algorithms, ERTs(given target function value) can also be
plotted in a scatter plot (log(ERT0) vs. log(ERT1)), which results in a
very attractive presentation, see the slides of Frank Hutter at
http://www.msr-inria.inria.fr/events-news/first-search-biology-day. The
advantage is that the absolute values do not get lost. The disadvantage
(in our case minor) is that there is an upper limit of data that can be
displayed.
"""
import os
import numpy
import numpy as np
from pdb import set_trace
from matplotlib import pyplot as plt
try:
from matplotlib.transforms import blended_transform_factory as blend
except ImportError:
# compatibility matplotlib 0.8
from matplotlib.transforms import blend_xy_sep_transform as blend
from .. import genericsettings, htmldesc, ppfigparam
from ..ppfig import saveFigure, save_single_functions_html, AlgorithmCount
from .. import toolsdivers
from .. import pproc
dimensions = (2, 3, 5, 10, 20, 40)
fixed_targets = pproc.TargetValues(np.logspace(-8, 2, 46))
#runlength_based_targets = pproc.RunlengthBasedTargetValues(np.logspace(numpy.log10(0.5), numpy.log10(50), 8))
# runlength_based_targets = pproc.RunlengthBasedTargetValues([0.5, 1, 3, 10, 50])
targets = fixed_targets # default
# formattings
colors = ('c', 'g', 'b', 'k', 'r', 'm', 'k', 'y', 'k', 'c', 'r', 'm')
markers = ('+', 'v', '*', 'o', 's', 'D', 'x')
markersize = 14 # modified in config.py
markersize_addon_beyond_maxevals = -6
linewidth_default = 0 # lines look ugly and are not necessary (anymore), because smaller symbols are used beyond maxevals
linewidth_rld_based = 2 # show lines because only 8 symbols are used
max_evals_line_length = 9 # length away from the diagonal as a factor, line indicates maximal evaluations for each data
offset = 0. #0.02 offset provides a way to move away the box boundaries to display the outer markers fully, clip_on=False is more effective
caption_start_fixed = r"""Expected running time (\ERT\ in $\log_{10}$ of number of function evaluations)
of \algorithmB\ ($x$-axis) versus \algorithmA\ ($y$-axis) for $NBTARGETS$ target values
$\Df \in [NBLOW, NBUP]$ in each dimension on functions #1. """
caption_start_rlbased = r"""Expected running time (\ERT\ in $\log_{10}$ of number of function evaluations)
of \algorithmA\ ($y$-axis) versus \algorithmB\ ($x$-axis) for $NBTARGETS$ runlength-based target
function values for budgets between $NBLOW$ and $NBUP$ evaluations.
Each runlength-based target $f$-value is chosen such that the \ERT{}s of the
REFERENCE_ALGORITHM artificial algorithm for the given and a slightly easier
target bracket the reference budget. """
caption_finish = r"""Markers on the upper or right edge indicate that the respective target
value was never reached. Markers represent dimension:
2:{\color{cyan}+},
3:{\color{green!45!black}$\triangledown$},
5:{\color{blue}$\star$},
10:$\circ$,
20:{\color{red}$\Box$},
40:{\color{magenta}$\Diamond$}. """
def figure_caption():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = caption_start_rlbased
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCE_ALGORITHM', targets.reference_algorithm)
else:
s = caption_start_fixed
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)))
s += caption_finish
return s
def figure_caption_html():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = htmldesc.getValue('##bbobppscatterlegendrlbased##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCEALGORITHM', targets.reference_algorithm)
else:
s = htmldesc.getValue('##bbobppscatterlegendfixed##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)))
s += htmldesc.getValue('##bbobppscatterlegendend##')
return s
def beautify():
a = plt.gca()
a.set_xscale('log')
a.set_yscale('log')
#a.set_xlabel('ERT0')
#a.set_ylabel('ERT1')
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
minbnd = min(xmin, ymin)
maxbnd = max(xmax, ymax)
maxbnd = maxbnd ** (1 + 11.*offset/(numpy.log10(float(maxbnd)/minbnd)))
plt.plot([minbnd, maxbnd], [minbnd, maxbnd], ls='-', color='k')
plt.plot([10*minbnd, 10*maxbnd], [minbnd, maxbnd], ls=':', color='k')
plt.plot([100*minbnd, 100*maxbnd], [minbnd, maxbnd], ls=':', color='k')
plt.plot([minbnd, maxbnd], [10*minbnd, 10*maxbnd], ls=':', color='k')
plt.plot([minbnd, maxbnd], [100*minbnd, 100*maxbnd], ls=':', color='k')
plt.xlim(minbnd, maxbnd)
plt.ylim(minbnd, maxbnd)
#a.set_aspect(1./a.get_data_ratio())
a.set_aspect('equal')
plt.grid(True)
tmp = a.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(numpy.log10(i)))
a.set_yticklabels(tmp2)
a.set_xticklabels(tmp2)
#for line in a.get_xticklines():# + a.get_yticklines():
# plt.setp(line, color='b', marker='o', markersize=10)
#set_trace()
def main(dsList0, dsList1, outputdir, verbose=True):
"""Generate a scatter plot figure.
TODO: """
#plt.rc("axes", labelsize=24, titlesize=24)
#plt.rc("xtick", labelsize=20)
#plt.rc("ytick", labelsize=20)
#plt.rc("font", size=20)
#plt.rc("legend", fontsize=20)
dictFunc0 = dsList0.dictByFunc()
dictFunc1 = dsList1.dictByFunc()
funcs = set(dictFunc0.keys()) & set(dictFunc1.keys())
if isinstance(targets, pproc.RunlengthBasedTargetValues):
linewidth = linewidth_rld_based
else:
linewidth = linewidth_default
funInfos = ppfigparam.read_fun_infos(dsList0.isBiobjective())
for f in funcs:
dictDim0 = dictFunc0[f].dictByDim()
dictDim1 = dictFunc1[f].dictByDim()
dims = set(dictDim0.keys()) & set(dictDim1.keys())
#set_trace()
for i, d in enumerate(dimensions):
try:
entry0 = dictDim0[d][0] # should be only one element
entry1 = dictDim1[d][0] # should be only one element
except (IndexError, KeyError):
continue
if linewidth: # plot all reliable ERT values as a line
all_targets = np.array(sorted(set(entry0.target).union(entry1.target), reverse=True))
assert entry0.detSuccessRates([all_targets[0]]) == 1.0
assert entry1.detSuccessRates([all_targets[0]]) == 1.0
all_targets = all_targets[np.where(all_targets <= targets((f, d))[0])[0]] #
xdata_all = np.array(entry0.detERT(all_targets))
ydata_all = np.array(entry1.detERT(all_targets))
# idx of reliable targets: last index where success rate >= 1/2 and ERT <= maxevals
idx = []
for ari in (np.where(entry0.detSuccessRates(all_targets) >= 0.5)[0],
np.where(entry1.detSuccessRates(all_targets) >= 0.5)[0],
np.where(xdata_all <= max(entry0.maxevals))[0],
np.where(ydata_all <= max(entry1.maxevals))[0]
):
if len(ari):
idx.append(ari[-1])
if len(idx) == 4:
max_idx = min(idx)
## at least up to the most difficult given target
## idx = max((idx, np.where(all_targets >= targets((f, d))[-1])[0][-1]))
xdata_all = xdata_all[:max_idx + 1]
ydata_all = ydata_all[:max_idx + 1]
idx = (numpy.isfinite(xdata_all)) * (numpy.isfinite(ydata_all))
assert idx.all()
if idx.any():
plt.plot(xdata_all[idx], ydata_all[idx], colors[i], ls='solid', lw=linewidth,
# TODO: ls has changed, check whether this works out
clip_on=False)
xdata = numpy.array(entry0.detERT(targets((f, d))))
ydata = numpy.array(entry1.detERT(targets((f, d))))
# plot "valid" data, those within maxevals
idx = np.logical_and(xdata < entry0.mMaxEvals(),
ydata < entry1.mMaxEvals())
# was:
# (numpy.isinf(xdata) == False) *
# (numpy.isinf(ydata) == False) *
# (xdata < entry0.mMaxEvals()) *
# (ydata < entry1.mMaxEvals()))
if idx.any():
try:
plt.plot(xdata[idx], ydata[idx], ls='',
markersize=markersize,
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=3,
clip_on=False)
except KeyError:
plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize,
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=3,
clip_on=False)
#try:
# plt.scatter(xdata[idx], ydata[idx], s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3)
#except ValueError:
# set_trace()
# plot beyond maxevals but finite data
idx = ((numpy.isinf(xdata) == False) *
(numpy.isinf(ydata) == False) *
np.logical_or(xdata >= entry0.mMaxEvals(),
ydata >= entry1.mMaxEvals()))
if idx.any():
try:
plt.plot(xdata[idx], ydata[idx], ls='',
markersize=markersize + markersize_addon_beyond_maxevals,
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
clip_on=False)
except KeyError:
plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize,
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
clip_on=False)
#ax = plt.gca()
ax = plt.axes()
# plot data on the right edge
idx = numpy.isinf(xdata) * (numpy.isinf(ydata) == False)
if idx.any():
# This (seems to) transform inf to the figure limits!?
trans = blend(ax.transAxes, ax.transData)
#plt.scatter([1.]*numpy.sum(idx), ydata[idx], s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot([1.]*numpy.sum(idx), ydata[idx],
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=trans, clip_on=False)
except KeyError:
plt.plot([1.]*numpy.sum(idx), ydata[idx],
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=trans, clip_on=False)
#set_trace()
# plot data on the left edge
idx = (numpy.isinf(xdata)==False) * numpy.isinf(ydata)
if idx.any():
# This (seems to) transform inf to the figure limits!?
trans = blend(ax.transData, ax.transAxes)
# plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx),
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=trans, clip_on=False)
except KeyError:
plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx),
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=trans, clip_on=False)
# plot data in the top corner
idx = numpy.isinf(xdata) * numpy.isinf(ydata)
if idx.any():
# plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx),
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=ax.transAxes, clip_on=False)
except KeyError:
plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx),
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=ax.transAxes, clip_on=False)
#set_trace()
beautify()
for i, d in enumerate(dimensions):
try:
entry0 = dictDim0[d][0] # should be only one element
entry1 = dictDim1[d][0] # should be only one element
except (IndexError, KeyError):
continue
minbnd, maxbnd = plt.xlim()
plt.plot((entry0.mMaxEvals(), entry0.mMaxEvals()),
# (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i],
(max([minbnd, entry1.mMaxEvals()/max_evals_line_length]), entry1.mMaxEvals()), ls='-', color=colors[i],
zorder=-1)
plt.plot(# (minbnd, entry0.mMaxEvals()),
(max([minbnd, entry0.mMaxEvals()/max_evals_line_length]), entry0.mMaxEvals()),
(entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-',
color=colors[i], zorder=-1)
plt.xlim(minbnd, maxbnd)
plt.ylim(minbnd, maxbnd)
#Set the boundaries again: they changed due to new plots.
#plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i])
#plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i])
if f in funInfos.keys():
plt.ylabel(funInfos[f])
filename = os.path.join(outputdir, 'ppscatter_f%03d' % f)
saveFigure(filename, verbose=verbose)
if f == 1:
algName1 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry1.algId))
algName0 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry0.algId))
save_single_functions_html(
os.path.join(outputdir, genericsettings.two_algorithm_file_name),
"%s vs %s" % (algName1, algName0),
algorithmCount = AlgorithmCount.TWO,
isBiobjective = dsList0.isBiobjective(),
functionGroups = dsList0.getFuncGroups())
plt.close()
#plt.rcdefaults()
|
bsd-3-clause
|
lin-credible/scikit-learn
|
benchmarks/bench_plot_omp_lars.py
|
266
|
4447
|
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
|
bsd-3-clause
|
robin-lai/scikit-learn
|
examples/applications/svm_gui.py
|
287
|
11161
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
matthew-tucker/mne-python
|
examples/io/plot_epochs_to_data_frame.py
|
5
|
8820
|
"""
=================================
Export epochs to Pandas DataFrame
=================================
In this example the pandas exporter will be used to produce a DataFrame
object. After exploring some basic features a split-apply-combine
work flow will be conducted to examine the latencies of the response
maxima across epochs and conditions.
Note. Equivalent methods are available for raw and evoked data objects.
Short Pandas Primer
-------------------
Pandas Data Frames
~~~~~~~~~~~~~~~~~~
A data frame can be thought of as a combination of matrix, list and dict:
It knows about linear algebra and element-wise operations but is size mutable
and allows for labeled access to its data. In addition, the pandas data frame
class provides many useful methods for restructuring, reshaping and visualizing
data. As most methods return data frame instances, operations can be chained
with ease; this allows to write efficient one-liners. Technically a DataFrame
can be seen as a high-level container for numpy arrays and hence switching
back and forth between numpy arrays and DataFrames is very easy.
Taken together, these features qualify data frames for inter operation with
databases and for interactive data exploration / analysis.
Additionally, pandas interfaces with the R statistical computing language that
covers a huge amount of statistical functionality.
Export Options
~~~~~~~~~~~~~~
The pandas exporter comes with a few options worth being commented.
Pandas DataFrame objects use a so called hierarchical index. This can be
thought of as an array of unique tuples, in our case, representing the higher
dimensional MEG data in a 2D data table. The column names are the channel names
from the epoch object. The channels can be accessed like entries of a
dictionary:
df['MEG 2333']
Epochs and time slices can be accessed with the .ix method:
epochs_df.ix[(1, 2), 'MEG 2333']
However, it is also possible to include this index as regular categorial data
columns which yields a long table format typically used for repeated measure
designs. To take control of this feature, on export, you can specify which
of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
index using the index parameter. Note that this decision is revertible any
time, as demonstrated below.
Similarly, for convenience, it is possible to scale the times, e.g. from
seconds to milliseconds.
Some Instance Methods
~~~~~~~~~~~~~~~~~~~~~
Most numpy methods and many ufuncs can be found as instance methods, e.g.
mean, median, var, std, mul, , max, argmax etc.
Below an incomplete listing of additional useful data frame instance methods:
apply : apply function to data.
Any kind of custom function can be applied to the data. In combination with
lambda this can be very useful.
describe : quickly generate summary stats
Very useful for exploring data.
groupby : generate subgroups and initialize a 'split-apply-combine' operation.
Creates a group object. Subsequently, methods like apply, agg, or transform
can be used to manipulate the underlying data separately but
simultaneously. Finally, reset_index can be used to combine the results
back into a data frame.
plot : wrapper around plt.plot
However it comes with some special options. For examples see below.
shape : shape attribute
gets the dimensions of the data frame.
values :
return underlying numpy array.
to_records :
export data as numpy record array.
to_dict :
export data as dict of arrays.
Reference
~~~~~~~~~
More information and additional introductory materials can be found at the
pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
"""
# Author: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
print(__doc__)
# turn on interactive mode
plt.ion()
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = Raw(raw_fname)
# For simplicity we will only consider the first 10 epochs
events = mne.read_events(event_fname)[:10]
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=True, reject=reject)
###############################################################################
# Export DataFrame
# The following parameters will scale the channels and times plotting
# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
# index whereas the condition is treated as categorial data. Note that
# this is optional. By passing None you could also print out all nesting
# factors in a long table style commonly used for analyzing repeated measure
# designs.
index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = epochs.to_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
index=index)
# Create MEG channel selector and drop EOG channel.
meg_chs = [c for c in df.columns if 'MEG' in c]
df.pop('EOG 061') # this works just like with a list.
###############################################################################
# Explore Pandas MultiIndex
# Pandas is using a MultiIndex or hierarchical index to handle higher
# dimensionality while at the same time representing data in a flat 2d manner.
print(df.index.names, df.index.levels)
# Inspecting the index object unveils that 'epoch', 'time' are used
# for subsetting data. We can take advantage of that by using the
# .ix attribute, where in this case the first position indexes the MultiIndex
# and the second the columns, that is, channels.
# Plot some channels across the first three epochs
xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
df.ix[:3, sel].plot(xticks=xticks)
mne.viz.tight_layout()
# slice the time starting at t0 in epoch 2 and ending 500ms after
# the base line in epoch 3. Note that the second part of the tuple
# represents time in milliseconds from stimulus onset.
df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
mne.viz.tight_layout()
# Note: For convenience the index was converted from floating point values
# to integer values. To restore the original values you can e.g. say
# df['times'] = np.tile(epoch.times, len(epochs_times)
# We now reset the index of the DataFrame to expose some Pandas
# pivoting functionality. To simplify the groupby operation we
# we drop the indices to treat epoch and time as categroial factors.
df = df.reset_index()
# The ensuing DataFrame then is split into subsets reflecting a crossing
# between condition and trial number. The idea is that we can broadcast
# operations into each cell simultaneously.
factors = ['condition', 'epoch']
sel = factors + ['MEG 1332', 'MEG 1342']
grouped = df[sel].groupby(factors)
# To make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
# Now we compare the mean of two channels response across conditions.
grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
color=['steelblue', 'orange'])
mne.viz.tight_layout()
# We can even accomplish more complicated tasks in a few lines calling
# apply method and passing a function. Assume we wanted to know the time
# slice of the maximum response for each condition.
max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
print(max_latency)
# Then make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
plt.figure()
max_latency.plot(kind='barh', title='Latency of Maximum Reponse',
color=['steelblue'])
mne.viz.tight_layout()
# Finally, we will again remove the index to create a proper data table that
# can be used with statistical packages like statsmodels or R.
final_df = max_latency.reset_index()
final_df.rename(columns={0: sel[2]}) # as the index is oblivious of names.
# The index is now written into regular columns so it can be used as factor.
print(final_df)
# To save as csv file, uncomment the next line.
# final_df.to_csv('my_epochs.csv')
# Note. Data Frames can be easily concatenated, e.g., across subjects.
# E.g. say:
#
# import pandas as pd
# group = pd.concat([df_1, df_2])
# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
|
bsd-3-clause
|
abimannans/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
323
|
1602
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
ZENGXH/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
134
|
7452
|
"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
AlexGrig/GPy
|
GPy/models/gradient_checker.py
|
8
|
17212
|
# ## Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ..core.model import Model
import itertools
import numpy
from ..core.parameterization import Param
np = numpy
from ..util.block_matrices import get_blocks, get_block_shapes, unblock, get_blocks_3d, get_block_shapes_3d
def get_shape(x):
if isinstance(x, numpy.ndarray):
return x.shape
return ()
def at_least_one_element(x):
if isinstance(x, (list, tuple)):
return x
return [x]
def flatten_if_needed(x):
return numpy.atleast_1d(x).flatten()
class GradientChecker(Model):
def __init__(self, f, df, x0, names=None, *args, **kwargs):
"""
:param f: Function to check gradient for
:param df: Gradient of function to check
:param x0:
Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).
Can be a list of arrays, if takes a list of arrays. This list will be passed
to f and df in the same order as given here.
If only one argument, make sure not to pass a list!!!
:type x0: [array-like] | array-like | float | int
:param names:
Names to print, when performing gradcheck. If a list was passed to x0
a list of names with the same length is expected.
:param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)
Examples:
---------
from GPy.models import GradientChecker
N, M, Q = 10, 5, 3
Sinusoid:
X = numpy.random.rand(N, Q)
grad = GradientChecker(numpy.sin,numpy.cos,X,'x')
grad.checkgrad(verbose=1)
Using GPy:
X, Z = numpy.random.randn(N,Q), numpy.random.randn(M,Q)
kern = GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True)
grad = GradientChecker(kern.K,
lambda x: 2*kern.dK_dX(numpy.ones((1,1)), x),
x0 = X.copy(),
names='X')
grad.checkgrad(verbose=1)
grad.randomize()
grad.checkgrad(verbose=1)
"""
Model.__init__(self, 'GradientChecker')
if isinstance(x0, (list, tuple)) and names is None:
self.shapes = [get_shape(xi) for xi in x0]
self.names = ['X{i}'.format(i=i) for i in range(len(x0))]
elif isinstance(x0, (list, tuple)) and names is not None:
self.shapes = [get_shape(xi) for xi in x0]
self.names = names
elif names is None:
self.names = ['X']
self.shapes = [get_shape(x0)]
else:
self.names = names
self.shapes = [get_shape(x0)]
for name, xi in zip(self.names, at_least_one_element(x0)):
self.__setattr__(name, Param(name, xi))
self.link_parameter(self.__getattribute__(name))
# self._param_names = []
# for name, shape in zip(self.names, self.shapes):
# self._param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape))))))
self.args = args
self.kwargs = kwargs
self.f = f
self.df = df
def _get_x(self):
if len(self.names) > 1:
return [self.__getattribute__(name) for name in self.names] + list(self.args)
return [self.__getattribute__(self.names[0])] + list(self.args)
def log_likelihood(self):
return float(numpy.sum(self.f(*self._get_x(), **self.kwargs)))
def _log_likelihood_gradients(self):
return numpy.atleast_1d(self.df(*self._get_x(), **self.kwargs)).flatten()
#def _get_params(self):
#return numpy.atleast_1d(numpy.hstack(map(lambda name: flatten_if_needed(self.__getattribute__(name)), self.names)))
#def _set_params(self, x):
#current_index = 0
#for name, shape in zip(self.names, self.shapes):
#current_size = numpy.prod(shape)
#self.__setattr__(name, x[current_index:current_index + current_size].reshape(shape))
#current_index += current_size
#def _get_param_names(self):
#_param_names = []
#for name, shape in zip(self.names, self.shapes):
#_param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape))))))
#return _param_names
class HessianChecker(GradientChecker):
def __init__(self, f, df, ddf, x0, names=None, *args, **kwargs):
"""
:param f: Function (only used for numerical hessian gradient)
:param df: Gradient of function to check
:param ddf: Analytical gradient function
:param x0:
Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).
Can be a list of arrays, if takes a list of arrays. This list will be passed
to f and df in the same order as given here.
If only one argument, make sure not to pass a list!!!
:type x0: [array-like] | array-like | float | int
:param names:
Names to print, when performing gradcheck. If a list was passed to x0
a list of names with the same length is expected.
:param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)
"""
super(HessianChecker, self).__init__(df, ddf, x0, names=names, *args, **kwargs)
self._f = f
self._df = df
self._ddf = ddf
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):
"""
Overwrite checkgrad method to check whole block instead of looping through
Shows diagnostics using matshow instead
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
"""
try:
import numdifftools as nd
except:
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests")
if target_param:
raise NotImplementedError('Only basic functionality is provided with this gradchecker')
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
#variables
current_index = 0
for name, shape in zip(self.names, self.shapes):
current_size = numpy.prod(shape)
x = self.optimizer_array.copy()
#x = self._get_params_transformed().copy()
x = x[current_index:current_index + current_size].reshape(shape)
# Check gradients
analytic_hess = self._ddf(x)
if analytic_hess.shape[1] == 1:
analytic_hess = numpy.diagflat(analytic_hess)
#From the docs:
#x0 : vector location
#at which to differentiate fun
#If x0 is an N x M array, then fun is assumed to be a function
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
numeric_hess_partial = nd.Jacobian(self._df, vectorized=False)
#numeric_hess_partial = nd.Derivative(self._df, vectorized=True)
numeric_hess = numeric_hess_partial(x)
check_passed = self.checkgrad_block(analytic_hess, numeric_hess, verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=plot)
current_index += current_size
return check_passed
def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):
"""
Checkgrad a block matrix
"""
if analytic_hess.dtype is np.dtype('object'):
#Make numeric hessian also into a block matrix
real_size = get_block_shapes(analytic_hess)
num_elements = np.sum(real_size)
if (num_elements, num_elements) == numeric_hess.shape:
#If the sizes are the same we assume they are the same
#(we have not fixed any values so the numeric is the whole hessian)
numeric_hess = get_blocks(numeric_hess, real_size)
else:
#Make a fake empty matrix and fill out the correct block
tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size)
tmp_numeric_hess[block_indices] = numeric_hess.copy()
numeric_hess = tmp_numeric_hess
if block_indices is not None:
#Extract the right block
analytic_hess = analytic_hess[block_indices]
numeric_hess = numeric_hess[block_indices]
else:
#Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian)
if analytic_hess.dtype is np.dtype('object'):
analytic_hess = unblock(analytic_hess)
numeric_hess = unblock(numeric_hess)
ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess))
difference = numpy.abs(analytic_hess - numeric_hess)
check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance)
if verbose:
if block_indices:
print("\nBlock {}".format(block_indices))
else:
print("\nAll blocks")
header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference']
header_string = map(lambda x: ' | '.join(header), [header])
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
min_r = '%.6f' % float(numpy.min(ratio))
max_r = '%.6f' % float(numpy.max(ratio))
max_d = '%.6f' % float(numpy.max(difference))
min_d = '%.6f' % float(numpy.min(difference))
cols = [max_r, min_r, min_d, max_d]
if check_passed:
checked = "\033[92m True \033[0m"
else:
checked = "\033[91m False \033[0m"
grad_string = "{} | {} | {} | {} | {} ".format(checked, cols[0], cols[1], cols[2], cols[3])
print(grad_string)
if plot:
from matplotlib import pyplot as pb
fig, axes = pb.subplots(2, 2)
max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess)))
min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess)))
msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim)
axes[0,0].set_title('Analytic hessian')
axes[0,0].xaxis.set_ticklabels([None])
axes[0,0].yaxis.set_ticklabels([None])
axes[0,0].xaxis.set_ticks([None])
axes[0,0].yaxis.set_ticks([None])
msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim)
pb.colorbar(msn, ax=axes[0,1])
axes[0,1].set_title('Numeric hessian')
axes[0,1].xaxis.set_ticklabels([None])
axes[0,1].yaxis.set_ticklabels([None])
axes[0,1].xaxis.set_ticks([None])
axes[0,1].yaxis.set_ticks([None])
msr = axes[1,0].matshow(ratio)
pb.colorbar(msr, ax=axes[1,0])
axes[1,0].set_title('Ratio')
axes[1,0].xaxis.set_ticklabels([None])
axes[1,0].yaxis.set_ticklabels([None])
axes[1,0].xaxis.set_ticks([None])
axes[1,0].yaxis.set_ticks([None])
msd = axes[1,1].matshow(difference)
pb.colorbar(msd, ax=axes[1,1])
axes[1,1].set_title('difference')
axes[1,1].xaxis.set_ticklabels([None])
axes[1,1].yaxis.set_ticklabels([None])
axes[1,1].xaxis.set_ticks([None])
axes[1,1].yaxis.set_ticks([None])
if block_indices:
fig.suptitle("Block: {}".format(block_indices))
pb.show()
return check_passed
class SkewChecker(HessianChecker):
def __init__(self, df, ddf, dddf, x0, names=None, *args, **kwargs):
"""
:param df: gradient of function
:param ddf: Gradient of function to check (hessian)
:param dddf: Analytical gradient function (third derivative)
:param x0:
Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).
Can be a list of arrays, if takes a list of arrays. This list will be passed
to f and df in the same order as given here.
If only one argument, make sure not to pass a list!!!
:type x0: [array-like] | array-like | float | int
:param names:
Names to print, when performing gradcheck. If a list was passed to x0
a list of names with the same length is expected.
:param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)
"""
super(SkewChecker, self).__init__(df, ddf, dddf, x0, names=names, *args, **kwargs)
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False, super_plot=False):
"""
Gradient checker that just checks each hessian individually
super_plot will plot the hessian wrt every parameter, plot will just do the first one
"""
try:
import numdifftools as nd
except:
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests")
if target_param:
raise NotImplementedError('Only basic functionality is provided with this gradchecker')
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
#variables
current_index = 0
for name, n_shape in zip(self.names, self.shapes):
current_size = numpy.prod(n_shape)
x = self.optimizer_array.copy()
#x = self._get_params_transformed().copy()
x = x[current_index:current_index + current_size].reshape(n_shape)
# Check gradients
#Actually the third derivative
analytic_hess = self._ddf(x)
#Can only calculate jacobian for one variable at a time
#From the docs:
#x0 : vector location
#at which to differentiate fun
#If x0 is an N x M array, then fun is assumed to be a function
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
#Actually _df is already the hessian
numeric_hess_partial = nd.Jacobian(self._df, vectorized=True)
numeric_hess = numeric_hess_partial(x)
print("Done making numerical hessian")
if analytic_hess.dtype is np.dtype('object'):
#Blockify numeric_hess aswell
blocksizes, pagesizes = get_block_shapes_3d(analytic_hess)
#HACK
real_block_size = np.sum(blocksizes)
numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes)
#numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes)
else:
numeric_hess = numeric_hess.reshape(*analytic_hess.shape)
#Check every block individually (for ease)
check_passed = [False]*numeric_hess.shape[2]
for block_ind in xrange(numeric_hess.shape[2]):
#Unless super_plot is set, just plot the first one
p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False
if verbose:
print("Checking derivative of hessian wrt parameter number {}".format(block_ind))
check_passed[block_ind] = self.checkgrad_block(analytic_hess[:,:,block_ind], numeric_hess[:,:,block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p)
current_index += current_size
return np.all(check_passed)
|
bsd-3-clause
|
detrout/debian-statsmodels
|
statsmodels/iolib/summary2.py
|
8
|
19601
|
from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
import datetime
import textwrap
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '} \\\\'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = tables[i].shape[1] - 1
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
info['R-squared:'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared:'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.ix[:, 1] = '(' + res.ix[:, 1] + ')'
# Significance stars
if stars:
idx = res.ix[:, 3] < .1
res.ix[:, 0][idx] = res.ix[:, 0][idx] + '*'
idx = res.ix[:, 3] < .05
res.ix[:, 0][idx] = res.ix[:, 0][idx] + '*'
idx = res.ix[:, 3] < .01
res.ix[:, 0][idx] = res.ix[:, 0][idx] + '*'
# Stack Coefs and Std.Errors
res = res.ix[:, :2]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=[], stars=False,
info_dict=None, regressor_order=[]):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings of length len(results) if the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of lambda functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(np.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df , name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.ix[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.ix[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
|
bsd-3-clause
|
Hiyorimi/scikit-image
|
doc/examples/features_detection/plot_brief.py
|
32
|
1879
|
"""
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
|
bsd-3-clause
|
adrinjalali/Network-Classifier
|
Raccoon/core/raccoon.py
|
1
|
8312
|
import numpy as np
import scipy.stats.stats
import sklearn.svm
import sklearn.base
import sklearn.preprocessing
import sklearn.grid_search
from sklearn import cross_validation as cv
from common import FCE
class Raccoon:
"""
FCE_type in: {"RidgeBasedFCE", "PredictBasedFCE"}
"""
def __init__(self, verbose=0, logger=None, n_jobs=1, dynamic_features=True,
FCE_type = 'RidgeBasedFCE'):
self.features = None
self.FCEs = dict()
self.verbose = verbose
self.n_jobs = n_jobs
self.Xtrain = None
self.ytrain = None
self.normalizer = None
self.dynamic_features = dynamic_features
if logger is None:
self.logger = print
else:
self.logger = logger
self.FCE_type = FCE_type
def fit(self, X, y):
#normalizer = sklearn.preprocessing.Normalizer().fit(X)
#self.normalizer = normalizer
#X = self.normalizer.transform(X)
self.Xtrain = X
self.ytrain = y
if self.verbose > 0:
self.logger("Selecting candidate features for feature pool")
# calculate pearson correlation and respective p-values
# select the ones with a p-value < 0.05
inner_cv = cv.KFold(len(y), n_folds=5)
i = 0
features = set(np.arange(X.shape[1]))
for train, test in inner_cv:
inner_xtrain = X[train, :]
inner_ytrain = y[train]
tmp = [np.abs(scipy.stats.stats.pearsonr(inner_ytrain, inner_xtrain[:, i]))
for i in range(inner_xtrain.shape[1])]
tmp = np.array(tmp)
threshold_5000 = np.sort(tmp[:, 1])[5000]
threshold = min(threshold_5000, 0.05)
if self.verbose > 0:
self.logger("threshold 5000, threshold: %g %g" % (threshold_5000, threshold))
features = features.intersection(set(np.arange(tmp.shape[0])[abs(tmp[:, 1]) < threshold]))
if self.verbose > 0:
self.logger("new features length: %d" % len(features))
self.features = np.array(list(features))
#self.features = self.features[0:3] #debuging
if self.verbose > 0:
self.logger("%d features selected. Fitting feature confidence estimators" % (len(self.features)))
if self.dynamic_features == False:
self.logger("Done.")
return
self.FCEs = dict()
i = 0
if self.FCE_type == 'RidgeBasedFCE':
for f in self.features:
i += 1
if self.verbose > 0:
self.logger("%d / %d fitting FCE for feature %d" % (i, self.features.shape[0], f))
fce = FCE.RidgeBasedFCE(self.logger, n_jobs=self.n_jobs,
verbose=self.verbose)
fce.fit(X, f)
self.FCEs[f] = fce
elif self.FCE_type == 'PredictBasedFCE':
for f in self.features:
i += 1
if self.verbose > 0:
self.logger("%d / %d fitting RDCs for feature %d" % (i, self.features.shape[0], f))
fce = FCE.PredictBasedFCE(feature_count=10, n_jobs=self.n_jobs,
logger=self.logger,
verbose=self.verbose)
fce.fit(X, f, fit_rdcs=True, fit_gp=False)
self.FCEs[f] = fce
i = 0
for f, fce in self.FCEs.items():
i += 1
if self.verbose > 0:
self.logger("%d / %d fitting FCE for feature %d" % (i, self.features.shape[0], f))
fce.fit(X, f, fit_rdcs=False, fit_gp=True)
else:
raise Exception("FCE_type unknown")
if self.verbose > 0:
self.logger("Done.")
def predict(self, X, model=sklearn.svm.SVC(), param_dist=None):
X = X.view(np.ndarray)
if X.ndim == 1:
X = X.reshape(1, -1)
#X = self.normalizer.transform(X)
if self.dynamic_features == False:
if self.verbose > 0:
self.logger("training the model")
random_search = sklearn.grid_search.RandomizedSearchCV(model, param_distributions=param_dist,
n_iter=100, n_jobs=self.n_jobs, cv=10,
verbose=0)
random_search.fit(self.Xtrain[:, self.features], self.ytrain)
results = list()
for i in range(X.shape[0]):
results.append({'model': random_search, 'confidences': None,
'selected_features': None,
'prediction': random_search.predict(X[i, self.features].reshape(1, -1)),
'decision_function': random_search.decision_function(X[i, self.features].reshape(1, -1))})
if self.verbose > 0:
self.logger("predict done.")
return results
results = list()
for i in range(X.shape[0]):
if self.verbose > 0:
self.logger("Sample %d / %d" % (i+1, X.shape[0]))
if self.verbose > 0:
self.logger("Selecting high confidence features")
confidences = {f: self.FCEs[f].getConfidence(X[i, ]) for f in self.features}
if self.verbose > 2:
self.logger(confidences)
max_confidence = max(confidences.values())
min_confidence = min(confidences.values())
if self.verbose > 1:
self.logger("Max and min confidences: %f, %f" % (max_confidence, min_confidence))
best_threshold = None
best_score = -float("inf")
for threshold in [0.2, 0.4, 0.6, 0.8]:
selected_features = [key for (key, value) in confidences.items()
if value > min_confidence + (max_confidence - min_confidence) * threshold]
if self.verbose > 2:
self.logger("Selected features and their confidences:")
self.logger([(key, confidences[key]) for key in selected_features])
random_search = sklearn.grid_search.RandomizedSearchCV(model, param_distributions=param_dist,
n_iter=100, n_jobs=self.n_jobs, cv=10,
verbose=0)
random_search.fit(self.Xtrain[:, selected_features], self.ytrain)
if random_search.best_score_ > best_score:
best_score = random_search.best_score_
best_threshold = threshold
if self.verbose > 0:
self.logger("score, threshold: %f, %g" % (random_search.best_score_, threshold))
if self.verbose > 1:
self.logger("Selected threshold: %g" % (best_threshold))
selected_features = [key for (key, value) in confidences.items()
if value > min_confidence + (max_confidence - min_confidence) * best_threshold]
if self.verbose > 2:
self.logger("Selected features and their confidences:")
self.logger([(key, confidences[key]) for key in selected_features])
random_search = sklearn.grid_search.RandomizedSearchCV(model, param_distributions=param_dist,
n_iter=100, n_jobs=self.n_jobs, cv=10,
verbose=0)
random_search.fit(self.Xtrain[:, selected_features], self.ytrain)
results.append({'model': random_search, 'confidences': confidences,
'selected_features': selected_features,
'prediction': random_search.predict(X[i, selected_features].reshape(1, -1)),
'decision_function': random_search.decision_function(X[i, selected_features].reshape(1, -1))})
return results
|
gpl-3.0
|
tboudreaux/PHY-2002
|
General.py
|
1
|
2497
|
import platform
import matplotlib as mpl
import imp
import math
import numpy as np
class PreChecks(object):
@staticmethod
def oscheck():
operatings = platform.system()
print('Checking Operating System')
if operatings == 'Windows':
print('Program does not run on Windows machines, please use a UNIX Like system to run program')
mac = False
exit()
elif operatings == 'Darwin':
mpl.interactive(True)
mac = True
print('OS OK')
else:
print('OS OK')
mac = False
return mac
@staticmethod
def modimport():
print 'Checking Modules'
foundall = True
modarray = ['astropy', 'numpy', 'matplotlib', 'sys', 'os', 'PyQt4', 'SecondGui', 'webbrowser', 'GuiFunction', 'BPlot']
for i in range(len(modarray)):
try:
imp.find_module(modarray[i])
print modarray[i] + ' OK'
except ImportError:
print 'Error ' + modarray[i] + ' is not installed'
foundall = False
return foundall
class Mathamatics(object):
@staticmethod
def smallest(array):
smallestnum = array[0]
for i in range(len(array)):
if array[i] < smallestnum:
smallestnum = array[i]
return smallestnum
@staticmethod
def largest(array):
largestnum = array[0]
for i in range(len(array)):
if array[i] > largestnum:
largestnum = array[i]
return largestnum
@staticmethod
def mag3D(vector):
mag = math.sqrt(vector[0]**2 + vector[1]**2 + vector[2]**2)
return mag
@staticmethod
def median(listi):
return np.median(np.array(listi))
@staticmethod
def stdev(array):
total = 0
mean = sum(array)/len(array)
for i in range(len(array)):
diff = array[i] - mean
diff *= diff
total += diff
total /= (len(array)-1)
return math.sqrt(total)
class DataStructures(object):
def __init__(self, SpectracType, Temperature, comparison):
self.comparison = dict()
@property
def spectralComp(self):
self.comparison = {'B0': 30000, 'B5': 16400, 'A0': 10800, 'A5': 8620, 'F0': 7240, 'F5': 6540, 'G0': 5920,
'G5': 5610, 'K0': 5240, 'K5': 4410, 'M0': 3920, 'M5': 3120}
return self.comparison
|
lgpl-3.0
|
hagberg/nx3k
|
convert.py
|
1
|
14268
|
"""Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
# if hasattr(data,"adj"):
# try:
# result= from_dict_of_dicts(data.adj,\
# create_using=create_using,\
# multigraph_input=data.is_multigraph())
# if hasattr(data,'graph'): # data.graph should be dict-like
# result.graph.update(data.graph)
# if hasattr(data,'node'): # data.node should be dict-like
# result.node.update( (n,dd.copy()) for n,dd in data.node.items() )
# return result
# except:
# raise nx.NetworkXError("Input is not a correct NetworkX graph.")
if hasattr(data,"_adjacency"):
# try:
result= from_dict_of_dicts(data._adjacency,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'data'): # data.graph should be dict-like
result.data.update(data.data)
if hasattr(data,'_nodedata'): # data.node should be dict-like
result._nodedata.update( (n,dd.copy()) for n,dd in data._nodedata.items() )
return result
# except:
# raise nx.NetworkXError("Input is not a correct NetworkX graph.")
if hasattr(data, "_nodes"):
result= from_dict_of_dicts(data.a,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'data'): # data.graph should be dict-like
result.data.update(data.data)
if hasattr(data,'_nodes'): # data.node should be dict-of-dict-like
result._nodes.update( (n,dd.copy()) for n,dd in data.n.items() )
return result
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.nx_agraph.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
# G.add_nodes_from(d)
G.n.update(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.e.update( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
# G.add_nodes_from(d)
G.n.update(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.e.update( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.e.update( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.e.update( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.e.update( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.e.add(u,v,attr_dict=data)
seen.add((v,u))
else:
G.e.update( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.e.update(edgelist)
return G
|
bsd-3-clause
|
dalejung/ibis
|
ibis/tasks.py
|
7
|
7836
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from cPickle import loads as pickle_load
from ibis.cloudpickle import dumps as pickle_dump
from ibis.wire import PackedMessageReader, PackedMessageWriter
import ibis.wire as wire
try:
import ibis.comms as comms
except ImportError:
pass
class IbisTaskMessage(object):
"""
Prototype wire protocol for task descriptions
uint32_t semaphore_id
uint32_t shmem_name_len
char* shmem_name
uint64_t shmem_offset
uint64_t shmem_size
"""
def __init__(self, semaphore_id, shmem_name, shmem_offset, shmem_size):
self.semaphore_id = semaphore_id
self.shmem_name = shmem_name
self.shmem_offset = shmem_offset
self.shmem_size = shmem_size
@classmethod
def decode(self, message):
"""
Convert from the bytestring wire protocol
Parameters
----------
message : bytes
Returns
-------
message : IbisTaskMessage
"""
buf = PackedMessageReader(message)
sem_id = buf.uint32()
shmem_name = buf.string()
shmem_offset = buf.uint64()
shmem_size = buf.uint64()
return IbisTaskMessage(sem_id, shmem_name, shmem_offset, shmem_size)
def encode(self):
"""
Format this message as a bytestring according to the current version of
the wire protocol.
Returns
-------
encoded : bytes
"""
buf = PackedMessageWriter()
buf.uint32(self.semaphore_id)
buf.string(self.shmem_name)
buf.uint64(self.shmem_offset)
buf.uint64(self.shmem_size)
return buf.get_result()
class Task(object):
"""
Prototype
Run task in a thread, capture tracebacks or other problems.
"""
def __init__(self, shmem):
self.shmem = shmem
self.complete = False
def mark_success(self):
wire.write_uint8(self.shmem, 1)
def mark_failure(self):
wire.write_uint8(self.shmem, 0)
def execute(self):
pass
def run(self):
raise NotImplementedError
def done(self):
pass
_task_registry = {}
def register_task(kind, task_class, override=False):
"""
Register a new task implementation with the execution system
"""
if kind in _task_registry and not override:
raise KeyError('Task of type %s is already defined and '
'override is False')
_task_registry[kind] = task_class
class IbisTaskExecutor(object):
"""
Runs the requested task and handles locking, exception reporting, and so
forth.
"""
def __init__(self, task_msg):
self.task_msg = task_msg
self.lock = comms.IPCLock(self.task_msg.semaphore_id)
self.shmem = comms.SharedMmap(self.task_msg.shmem_name,
self.task_msg.shmem_size,
offset=self.task_msg.shmem_offset)
def _cycle_ipc_lock(self):
# TODO: I put this here as a failsafe in case the task needs to bail
# out for a known reason and we want to immediately release control to
# the master process
self.lock.acquire()
self.lock.release()
def execute(self):
# TODO: Timeout concerns
self.lock.acquire()
# TODO: this can break in various ways on bad input
task_type = wire.read_string(self.shmem)
try:
klass = _task_registry[task_type]
task = klass(self.shmem)
task.run()
except:
self.shmem.seek(0)
# XXX: Failure indicator
wire.write_uint8(self.shmem, 0)
tb = traceback.format_exc()
# HACK: Traceback string must be truncated so it will fit in the
# shared memory (along with the uint32 length prefix)
if len(tb) + 5 > len(self.shmem):
tb = tb[:len(self.shmem) - 5]
wire.write_string(self.shmem, tb)
finally:
self.lock.release()
# ---------------------------------------------------------------------
# Ping pong task for testing
class PingPongTask(Task):
def run(self):
self.shmem.seek(0)
self.mark_success()
wire.write_string(self.shmem, 'pong')
register_task('ping', PingPongTask)
# ---------------------------------------------------------------------
# Aggregation execution tasks
class AggregationTask(Task):
def _write_response(self, agg_inst):
self.shmem.seek(0)
self.mark_success()
serialized_inst = pickle_dump(agg_inst)
wire.write_string(self.shmem, serialized_inst)
class AggregationUpdateTask(AggregationTask):
"""
Task header layout
- serialized agg class
- prior state flag 1/0
- (optional) serialized prior state
- serialized table fragment
"""
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
self._read_header()
def _read_header(self):
reader = wire.PackedMessageReader(self.shmem)
# Unpack header
self.agg_class_pickled = reader.string()
has_prior_state = reader.uint8() != 0
if has_prior_state:
self.prior_state = pickle_load(reader.string())
else:
self.prior_state = None
def run(self):
if self.prior_state is not None:
agg_inst = self.prior_state
else:
klass = pickle_load(self.agg_class_pickled)
agg_inst = klass()
args = self._deserialize_args()
agg_inst.update(*args)
self._write_response(agg_inst)
def _deserialize_args(self):
# TODO: we need some mechanism to indicate how the data should be
# deserialized before passing to the aggregator. For now, will assume
# "pandas-friendly" NumPy-format
# Deserialize data fragment
table_reader = comms.IbisTableReader(self.shmem)
args = []
for i in range(table_reader.ncolumns):
col = table_reader.get_column(i)
arg = col.to_numpy_for_pandas()
args.append(arg)
return args
class AggregationMergeTask(AggregationTask):
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
reader = wire.PackedMessageReader(shmem)
# TODO: may wish to merge more than 2 at a time?
# Unpack header
self.left_inst = pickle_load(reader.string())
self.right_inst = pickle_load(reader.string())
def run(self):
# Objects to merge stored in length-prefixed strings in shared memory
merged = self.left_inst.merge(self.right_inst)
self._write_response(merged)
class AggregationFinalizeTask(AggregationTask):
def __init__(self, shmem):
AggregationTask.__init__(self, shmem)
reader = wire.PackedMessageReader(shmem)
self.state = pickle_load(reader.string())
def run(self):
# Single length-prefixed string to finalize
result = self.state.finalize()
self._write_response(result)
register_task('agg-update', AggregationUpdateTask)
register_task('agg-merge', AggregationMergeTask)
register_task('agg-finalize', AggregationFinalizeTask)
|
apache-2.0
|
davidam/python-examples
|
pandas/pandas-dataframe.py
|
1
|
1157
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import pandas as pd
data = {
'Country': ['Belgium', 'India', 'Brazil'],
'Capital': ['Brussels', 'New Delhi', 'Brasilia']
}
df = pd.DataFrame(data,columns=['Country', 'Capital', 'Population'])
print(df)
print(list(df.columns.values))
|
gpl-3.0
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/examples/mplot3d/lorenz_attractor.py
|
3
|
1243
|
# Plot of the Lorenz Attractor based on Edward Lorenz's 1963 "Deterministic
# Nonperiodic Flow" publication.
# http://journals.ametsoc.org/doi/abs/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2
#
# Note: Because this is a simple non-linear ODE, it would be more easily
# done using SciPy's ode solver, but this approach depends only
# upon NumPy.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def lorenz(x, y, z, s=10, r=28, b=2.667) :
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
dt = 0.01
stepCnt = 10000
# Need one more for the initial values
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
xs[0], ys[0], zs[0] = (0., 1., 1.05)
# Stepping through "time".
for i in xrange(stepCnt) :
# Derivatives of the X, Y, Z state
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
plt.show()
|
gpl-2.0
|
ankurankan/scikit-learn
|
examples/hetero_feature_union.py
|
288
|
6236
|
"""
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
|
bsd-3-clause
|
srio/shadow3-scripts
|
COMSOL/test1.py
|
1
|
2706
|
# https://numerical-analysis.readthedocs.io/en/latest/Interpolation/2D_Interpolation.html
# Setup
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
params = {'font.size' : 14,
'figure.figsize':(15.0, 8.0),
'lines.linewidth': 2.,
'lines.markersize': 15,}
matplotlib.rcParams.update(params)
Ni = 40
Pi = np.random.rand(Ni, 2)
print(">>>>Pi",Pi.shape)
Xi, Yi = Pi[:,0], Pi[:,1]
Zi = np.random.rand(Ni)
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(Xi, Yi, Zi, "or", label='Data')
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
# triangulation
from scipy.spatial import Delaunay
Pi = np.array([Xi, Yi]).transpose()
tri = Delaunay(Pi)
plt.triplot(Xi, Yi , tri.simplices.copy())
plt.plot(Xi, Yi, "or", label = "Data")
plt.grid()
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# interpolation
N = 100
x = np.linspace(0., 1., N)
y = np.linspace(0., 1., N)
X, Y = np.meshgrid(x, y)
P = np.array([X.flatten(), Y.flatten() ]).transpose()
plt.plot(Xi, Yi, "or", label = "Data")
plt.triplot(Xi, Yi , tri.simplices.copy())
plt.plot(X.flatten(), Y.flatten(), "g,", label = "Z = ?")
plt.legend()
plt.grid()
plt.show()
# nearest interpolation
from scipy.interpolate import griddata
Z_nearest = griddata(Pi, Zi, P, method = "nearest").reshape([N, N])
plt.contourf(X, Y, Z_nearest, 50)
plt.plot(Xi, Yi, "or", label = "Data")
plt.colorbar()
plt.legend()
plt.grid()
plt.show()
# linear interpolation
from scipy.interpolate import griddata
Z_linear = griddata(Pi, Zi, P, method = "linear").reshape([N, N])
plt.contourf(X, Y, Z_linear, 50, cmap = mpl.cm.jet)
plt.colorbar()
plt.contour(X, Y, Z_linear, 10, colors = "k")
#plt.triplot(Xi, Yi , tri.simplices.copy(), color = "k")
plt.plot(Xi, Yi, "or", label = "Data")
plt.legend()
plt.grid()
plt.show()
# higher order interpolation
from scipy.interpolate import griddata
Z_cubic = griddata(Pi, Zi, P, method = "cubic").reshape([N, N])
plt.contourf(X, Y, Z_cubic, 50, cmap = mpl.cm.jet)
plt.colorbar()
plt.contour(X, Y, Z_cubic, 20, colors = "k")
#plt.triplot(Xi, Yi , tri.simplices.copy(), color = "k")
plt.plot(Xi, Yi, "or", label = "Data")
plt.legend()
plt.grid()
plt.show()
# comparison/discussion
levels = np.linspace(0., 1., 50)
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
plt.contourf(X, Y, Z_nearest, levels)
plt.grid()
ax = fig.add_subplot(1, 3, 2)
plt.contourf(X, Y, Z_linear, levels)
plt.grid()
ax = fig.add_subplot(1, 3, 3)
plt.contourf(X, Y, Z_cubic, levels)
plt.grid()
plt.plot
|
mit
|
qiwsir/vincent
|
examples/scatter_chart_examples.py
|
9
|
2130
|
# -*- coding: utf-8 -*-
"""
Vincent Scatter Examples
"""
#Build a Line Chart from scratch
from vincent import *
import pandas as pd
import pandas.io.data as web
import datetime
all_data = {}
date_start = datetime.datetime(2010, 1, 1)
date_end = datetime.datetime(2014, 1, 1)
for ticker in ['AAPL', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.DataReader(ticker, 'yahoo', date_start, date_end)
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
#Note that we're using timeseries, so x-scale type is "time". For non
#timeseries data, use "linear"
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
transform = MarkRef(data='table',
transform=[Transform(type='facet', keys=['data.col'])])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
fill=ValueRef(scale='color', field='data.col'),
size=ValueRef(value=10))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='symbol',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
data = Data.from_pandas(price[['MSFT', 'AAPL']])
#Using a Vincent Keyed List here
vis.data['table'] = data
vis.axis_titles(x='Date', y='Price')
vis.legend(title='MSFT vs AAPL')
vis.to_json('vega.json')
#Convenience method
vis = Scatter(price[['MSFT', 'AAPL']])
vis.axis_titles(x='Date', y='Price')
vis.legend(title='MSFT vs AAPL')
vis.colors(brew='RdBu')
vis.to_json('vega.json')
|
mit
|
nickdex/cosmos
|
code/computer_graphics/src/diamond_square/diamond_square.py
|
3
|
3544
|
import numpy as np
def show_as_height_map(height, mat):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
x, y = np.meshgrid(np.arange(height), np.arange(height))
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(x, y, mat)
plt.title("height map")
plt.show()
def update_pixel(pixel, mean, magnitude):
return mean + (2 * pixel * magnitude) - magnitude
def main(n, smooth_factor, plot_enable):
height = (1 << n) + 1
mat = np.random.random((height, height))
i = height - 1
magnitude = 1
# seeds init
mat[0, 0] = update_pixel(mat[0, 0], 0, magnitude)
mat[0, height - 1] = update_pixel(mat[0, height - 1], 0, magnitude)
mat[height - 1, height - 1] = update_pixel(
mat[height - 1, height - 1], 0, magnitude
)
mat[0, height - 1] = update_pixel(mat[0, height - 1], 0, magnitude)
while i > 1:
id_ = i >> 1
magnitude *= smooth_factor
for xIndex in range(id_, height, i): # Beginning of the Diamond Step
for yIndex in range(id_, height, i):
mean = (
mat[xIndex - id_, yIndex - id_]
+ mat[xIndex - id_, yIndex + id_]
+ mat[xIndex + id_, yIndex + id_]
+ mat[xIndex + id_, yIndex - id_]
) / 4
mat[xIndex, yIndex] = update_pixel(mat[xIndex, yIndex], mean, magnitude)
for xIndex in range(0, height, id_): # Beginning of the Square Step
if xIndex % i == 0:
shift = id_
else:
shift = 0
for yIndex in range(shift, height, i):
sum_ = 0
n = 0
if xIndex >= id_:
sum_ += mat[xIndex - id_, yIndex]
n += 1
if xIndex + id_ < height:
sum_ += mat[xIndex + id_, yIndex]
n += 1
if yIndex >= id_:
sum_ += mat[xIndex, yIndex - id_]
n += 1
if yIndex + id_ < height:
sum_ += mat[xIndex, yIndex + id_]
n += 1
mean = sum_ / n
mat[xIndex, yIndex] = update_pixel(mat[xIndex, yIndex], mean, magnitude)
i = id_
if plot_enable:
show_as_height_map(height, mat)
return mat
def check_smooth_factor(value):
fvalue = float(value)
if fvalue < 0 or fvalue > 1:
raise argparse.ArgumentTypeError("%s is an invalid smooth factor value" % value)
return fvalue
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="machine generated calculation")
parser.add_argument(
"-n",
help="the size of the image will be 2**n + 1",
required=False,
default=8,
type=check_positive,
)
parser.add_argument(
"-s",
help="smooth factor, needs to be in range of [0, 1], value of 0 means image is very smooth,"
"value of 1 means image is very rough",
required=False,
default=0.5,
type=check_smooth_factor,
)
parser.add_argument("-p", help="plot with matplotlib", action="store_true")
args = parser.parse_args()
main(args.n, args.s, args.p)
|
gpl-3.0
|
imaculate/scikit-learn
|
examples/mixture/plot_gmm_covariances.py
|
11
|
4723
|
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/groupby/test_groupby_subclass.py
|
2
|
2669
|
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_groupby_preserves_subclass(obj, groupby_func):
# GH28330 -- preserve subclass through groupby operations
if isinstance(obj, Series) and groupby_func in {"corrwith"}:
pytest.skip("Not applicable")
grouped = obj.groupby(np.arange(0, 10))
# Groups should preserve subclass type
assert isinstance(grouped.get_group(0), type(obj))
args = []
if groupby_func in {"fillna", "nth"}:
args.append(0)
elif groupby_func == "corrwith":
args.append(obj)
elif groupby_func == "tshift":
args.extend([0, 0])
result1 = getattr(grouped, groupby_func)(*args)
result2 = grouped.agg(groupby_func, *args)
# Reduction or transformation kernels should preserve type
slices = {"ngroup", "cumcount", "size"}
if isinstance(obj, DataFrame) and groupby_func in slices:
assert isinstance(result1, obj._constructor_sliced)
else:
assert isinstance(result1, type(obj))
# Confirm .agg() groupby operations return same results
if isinstance(result1, DataFrame):
tm.assert_frame_equal(result1, result2)
else:
tm.assert_series_equal(result1, result2)
def test_groupby_preserves_metadata():
# GH-37343
custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})
assert "testattr" in custom_df._metadata
custom_df.testattr = "hello"
for _, group_df in custom_df.groupby("c"):
assert group_df.testattr == "hello"
@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame])
def test_groupby_resample_preserves_subclass(obj):
# GH28330 -- preserve subclass through groupby.resample()
df = obj(
{
"Buyer": "Carl Carl Carl Carl Joe Carl".split(),
"Quantity": [18, 3, 5, 1, 9, 3],
"Date": [
datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0),
],
}
)
df = df.set_index("Date")
# Confirm groupby.resample() preserves dataframe type
result = df.groupby("Buyer").resample("5D").sum()
assert isinstance(result, obj)
|
bsd-3-clause
|
flightgong/scikit-learn
|
sklearn/utils/graph.py
|
50
|
6169
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
pravsripad/mne-python
|
examples/visualization/plot_topo_compare_conditions.py
|
20
|
1828
|
"""
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
|
bsd-3-clause
|
qhw0820/blog
|
DMPs/dmp_discrete.py
|
4
|
5988
|
'''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from dmp import DMPs
import numpy as np
class DMPs_discrete(DMPs):
"""An implementation of discrete DMPs"""
def __init__(self, **kwargs):
"""
"""
# call super class constructor
super(DMPs_discrete, self).__init__(pattern='discrete', **kwargs)
self.gen_centers()
# set variance of Gaussian basis functions
# trial and error to find this spacing
self.h = np.ones(self.bfs) * self.bfs**1.5 / self.c
self.check_offset()
def gen_centers(self):
"""Set the centre of the Gaussian basis
functions be spaced evenly throughout run time"""
'''x_track = self.cs.discrete_rollout()
t = np.arange(len(x_track))*self.dt
# choose the points in time we'd like centers to be at
c_des = np.linspace(0, self.cs.run_time, self.bfs)
self.c = np.zeros(len(c_des))
for ii, point in enumerate(c_des):
diff = abs(t - point)
self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]'''
# desired spacings along x
# need to be spaced evenly between 1 and exp(-ax)
# lowest number should be only as far as x gets
first = np.exp(-self.cs.ax*self.cs.run_time)
last = 1.05 - first
des_c = np.linspace(first,last,self.bfs)
self.c = np.ones(len(des_c))
for n in range(len(des_c)):
# x = exp(-c), solving for c
self.c[n] = -np.log(des_c[n])
def gen_front_term(self, x, dmp_num):
"""Generates the diminishing front term on
the forcing term.
x float: the current value of the canonical system
dmp_num int: the index of the current dmp
"""
return x * (self.goal[dmp_num] - self.y0[dmp_num])
def gen_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs the goal is the average of the
desired trajectory.
y_des np.array: the desired trajectory to follow
"""
return y_des[:,-1].copy()
def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
state of the canonical system.
x float: the current state of the canonical system
"""
return np.exp(-self.h * (x - self.c)**2)
def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system rollout.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:,None]
return np.exp(-self.h * (x - self.c)**2)
def gen_weights(self, f_target):
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched.
f_target np.array: the desired forcing term trajectory
"""
# calculate x and psi
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
#efficiently calculate weights for BFs using weighted linear regression
self.w = np.zeros((self.dmps, self.bfs))
for d in range(self.dmps):
# spatial scaling term
k = (self.goal[d] - self.y0[d])
for b in range(self.bfs):
numer = np.sum(x_track * psi_track[:,b] * f_target[:,d])
denom = np.sum(x_track**2 * psi_track[:,b])
self.w[d,b] = numer / (k * denom)
#==============================
# Test code
#==============================
if __name__ == "__main__":
# test normal run
dmp = DMPs_discrete(dmps=1, bfs=10, w=np.zeros((1,10)))
y_track,dy_track,ddy_track = dmp.rollout()
import matplotlib.pyplot as plt
plt.figure(1, figsize=(6,3))
plt.plot(np.ones(len(y_track))*dmp.goal, 'r--', lw=2)
plt.plot(y_track, lw=2)
plt.title('DMP system - no forcing term')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend(['goal', 'system state'], loc='lower right')
plt.tight_layout()
# test imitation of path run
plt.figure(2, figsize=(6,4))
num_bfs = [10, 30, 50, 100, 10000]
# a straight line to target
path1 = np.sin(np.arange(0,1,.01)*5)
# a strange path to target
path2 = np.zeros(path1.shape)
path2[(len(path2) / 2.):] = .5
for ii, bfs in enumerate(num_bfs):
dmp = DMPs_discrete(dmps=2, bfs=bfs)
dmp.imitate_path(y_des=np.array([path1, path2]))
# change the scale of the movement
dmp.goal[0] = 3; dmp.goal[1] = 2
y_track,dy_track,ddy_track = dmp.rollout()
plt.figure(2)
plt.subplot(211)
plt.plot(y_track[:,0], lw=2)
plt.subplot(212)
plt.plot(y_track[:,1], lw=2)
plt.subplot(211)
a = plt.plot(path1 / path1[-1] * dmp.goal[0], 'r--', lw=2)
plt.title('DMP imitate path')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend([a[0]], ['desired path'], loc='lower right')
plt.subplot(212)
b = plt.plot(path2 / path2[-1] * dmp.goal[1], 'r--', lw=2)
plt.title('DMP imitate path')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend(['%i BFs'%i for i in num_bfs], loc='lower right')
plt.tight_layout()
plt.show()
|
gpl-3.0
|
mjvakili/supermean
|
code/matrix.py
|
1
|
1525
|
import sampler
import profile
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
M , H = 25 , 3
y = profile.makeGaussian(H*M, H*M/6. ,0.1, (H*M/2.+0.83*H, H*M/2.+1.45*H))
hx, hy, hf = sampler.test_imatrix_new(M, H, -1.45, -0.83)
qw = hx.dot(y).dot(hy)
qq = y.flatten().dot(hf).reshape(M,M)
plt.subplot(1,3,1)
plt.imshow(y, interpolation = "None" , origin = "lower")
plt.colorbar()
plt.subplot(1,3,2)
plt.imshow(qw, interpolation = "None" , origin = "lower")
plt.colorbar()
plt.subplot(1,3,3)
plt.imshow(qq, interpolation = "None" , origin = "lower")
plt.colorbar()
plt.show()
M , H = 25 , 3
y = profile.makeGaussian(M, M/5. ,0.1, (M/2.+0.83, M/2.+1.45))
y /= y.sum()
import c3
print c3.find_centroid(y) #data
model = profile.makeGaussian(H*M, H*M/5. ,0.1, (H*M/2., H*M/2.)) #model
hh = sampler.imatrix(y.flatten() , H) #sampling matrix
ww = model.flatten().dot(hh).reshape(M,M) #model rendered on data grid
ww = ww/ww.sum()
plt.subplot(1,4,1)
plt.imshow(y, interpolation = "None" , origin = "lower")
plt.title("data")
plt.colorbar()
plt.subplot(1,4,2)
plt.imshow(model, interpolation = "None" , origin = "lower")
plt.title("model")
plt.colorbar()
plt.subplot(1,4,3)
plt.imshow(ww, interpolation = "None" , origin = "lower")
plt.title("rendered model")
plt.colorbar()
plt.subplot(1,4,4)
plt.imshow(ww-y, interpolation = "None" , origin = "lower")
plt.title("residual")
plt.colorbar()
plt.show()
|
mit
|
alexsavio/scikit-learn
|
sklearn/datasets/svmlight_format.py
|
41
|
16768
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
bsd-3-clause
|
sjyk/activedetect
|
benchmarks/Benchmark.py
|
1
|
5095
|
#!/usr/bin/env python
from activedetect.loaders.csv_loader import CSVLoader
from activedetect.error_detectors.ErrorDetector import ErrorDetector
from activedetect.loaders.type_inference import LoLTypeInference
from activedetect.model_based.preprocessing_utils import *
from activedetect.error_detectors.QuantitativeErrorModule import QuantitativeErrorModule
from activedetect.error_detectors.StringSimilarityErrorModule import StringSimilarityErrorModule
from activedetect.error_detectors.QuantitativeErrorModule import QuantitativeErrorModule
from activedetect.error_detectors.SemanticErrorModule import SemanticErrorModule
from activedetect.error_detectors.CharSimilarityErrorModule import CharSimilarityErrorModule
from activedetect.error_detectors.PuncErrorModule import PuncErrorModule
import numpy as np
import pickle
from sklearn.covariance import MinCovDet
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
import datetime
"""
This is an abstract class that defines a benchmark
"""
class Benchmark():
def getDataset(self):
raise NotImplemented("Loads the data from some source")
def getQuantitativeConfig(self):
raise NotImplemented("Returns config for quantitative detection")
def getADConfig(self):
q_detect = QuantitativeErrorModule
punc_detect = PuncErrorModule
config = [{'thresh': 10},
{}]
return ([q_detect, punc_detect], config)
def _groundTruth(self, dataset):
raise NotImplemented("Ground Truth Not Implemented")
def _quantitative(self, dataset):
config = self.getQuantitativeConfig()
e = ErrorDetector(dataset, modules=[QuantitativeErrorModule], config=config)
e.fit()
return set([error['cell'][0] for error in e])
def _missing(self, dataset):
e = ErrorDetector(dataset, modules=[PuncErrorModule], config=[{}])
e.fit()
return set([error['cell'][0] for error in e])
def _ad(self, dataset):
config = self.getADConfig()
e = ErrorDetector(dataset, modules=config[0], config=config[1])
e.fit()
#print set([error['cell_value'] for error in e])
return set([error['cell'][0] for error in e])
def pr(self, gt, s2):
tp = len(gt.intersection(s2))+0.0
fp = len([fp for fp in s2 if fp not in gt])
fn = len([fn for fn in gt if fn not in s2])
try:
return (tp/(tp + fp), tp/(tp+fn))
except:
return (0.0,0.0)
def _naiveMCD(self, dataset, thresh=3):
types = LoLTypeInference().getDataTypes(dataset)
qdataset = [ [d[i] for i,t in enumerate(types) if t =='numerical' ] for d in dataset]
X = featurize(qdataset, [t for t in types if t =='numerical'])
xshape = np.shape(X)
#for conditioning problems with the estimate
Xsamp = X + 0.01*np.random.randn(xshape[0],xshape[1])
m = MinCovDet()
m.fit(Xsamp)
sigma = np.linalg.inv(m.covariance_)
mu = np.mean(X, axis=0)
results = []
for i in range(0,xshape[0]):
val = np.squeeze((X[i,:] - mu) * sigma * (X[i,:] - mu).T)[0,0]
results.append([str(val)])
e = ErrorDetector(results, modules=[QuantitativeErrorModule], config=[{'thresh': thresh}])
e.fit()
return set([error['cell'][0] for error in e])
def _ocSVM(self, dataset, outliers_fraction=0.2):
types = LoLTypeInference().getDataTypes(dataset)
qdataset = [ [d[i] for i,t in enumerate(types) if t =='numerical' ] for d in dataset]
X = featurize(qdataset, [t for t in types if t =='numerical'])
xshape = np.shape(X)
#for conditioning problems with the estimate
#Xsamp = X + 0.01*np.random.randn(xshape[0],xshape[1])
m = OneClassSVM(nu=0.95 * outliers_fraction + 0.05, kernel="rbf", gamma=0.1)
m.fit(X)
#print [a for a in np.argwhere(m.predict(X)==-1)]
return set([a[0] for a in np.argwhere(m.predict(X)==-1)])
def _isoForest(self, dataset, outliers_fraction=0.2):
types = LoLTypeInference().getDataTypes(dataset)
qdataset = [ [d[i] for i,t in enumerate(types) if t =='numerical' ] for d in dataset]
X = featurize(qdataset, [t for t in types if t =='numerical'])
xshape = np.shape(X)
#for conditioning problems with the estimate
#Xsamp = X + 0.01*np.random.randn(xshape[0],xshape[1])
m = IsolationForest(max_samples= 200, contamination=outliers_fraction)
m.fit(X)
#print [a for a in np.argwhere(m.predict(X)==-1)]
return set([a[0] for a in np.argwhere(m.predict(X)==-1)])
def getResults(self):
dataset = self.getDataset()
results = {}
gt = self._groundTruth(dataset)
results['CQuantitative'] = self.runExperiment(dataset, self._quantitative, gt)
#results['NMCD'] = self.runExperiment(dataset, self._naiveMCD, gt)
#results['OCSVM'] = self.runExperiment(dataset, self._ocSVM, gt)
results['AD'] = self.runExperiment(dataset, self._ad, gt)
results['Missing'] = self.runExperiment(dataset, self._missing, gt)
results['ISOF'] = self.runExperiment(dataset, self._isoForest, gt)
return results
def runExperiment(self, dataset, technique, gt):
start = datetime.datetime.now()
result = technique(dataset)
accuracy = self.pr(gt, result)
runtime = datetime.datetime.now() - start
return (runtime.seconds, accuracy[0], accuracy[1])
|
mit
|
idbedead/RNA-sequence-tools
|
qpcr.py
|
2
|
4407
|
import pandas as pd
from pprint import pprint
import itertools
import numpy as np
import os
import seaborn as sns
import matplotlib.pyplot as plt
qpcr_file ='/Users/idriver/Downloads/2016-04-19_ITGA8_1_6_7_8.txt'
qpcr_df = pd.DataFrame.from_csv(qpcr_file, sep= '\t')
samples = list(set(qpcr_df['Sample Name']))
targets = list(set(qpcr_df['Target Name']))
sample_dict = {}
index_list = []
for samp in samples:
target_dict = {}
delta_ct_actb_dict = {}
delta_ct_gapdh_dict = {}
sample_df = qpcr_df[qpcr_df['Sample Name'] == samp]
for targ in targets:
target_df = sample_df[(sample_df['Target Name'] == targ)]
targ_mean = target_df['CT'].convert_objects(convert_numeric=True).mean()
target_dict[targ] = targ_mean
index_list.append(samp)
for targ2 in targets:
actb_mean = target_dict['ACTB']
gapdh_mean = target_dict['GAPDH']
if targ2 != 'ACTB':
delta_ct_actb = actb_mean - target_dict[targ2]
else:
delta_ct_actb = 0
if targ2 != 'GAPDH':
delta_ct_gapdh = gapdh_mean - target_dict[targ2]
else:
delta_ct_gapdh = 0
delta_ct_actb_dict[targ2] = delta_ct_actb
delta_ct_gapdh_dict[targ2] = delta_ct_gapdh
sample_dict[samp] = target_dict
sample_dict['delta_ct_actb_'+samp] = delta_ct_actb_dict
sample_dict['delta_ct_gapdh_'+samp] = delta_ct_gapdh_dict
delta_pairs = []
for samp1,samp2 in itertools.permutations(samples,2):
if samp1 != samp2 and samp1[-2:] == samp2[-2:] and 'No' in samp2:
delta_pairs.append((samp1,samp2))
if samp1 != samp2 and samp1[-2:] == samp2[-2:] and 'neg' in samp2:
delta_pairs.append((samp1,samp2))
results_df = pd.DataFrame.from_dict(sample_dict)
fc_results_df = 0
for p in delta_pairs:
pow_dict = dict(zip(targets,[2 for t in targets]))
ratio_dict = {'pos_dict_a':sample_dict['delta_ct_actb_'+p[0]],'neg_dict_a':sample_dict['delta_ct_actb_'+p[1]], 'pos_dict_g':sample_dict['delta_ct_gapdh_'+p[0]], 'neg_dict_g':sample_dict['delta_ct_gapdh_'+p[1]], 'pwer':pow_dict}
pair_df = pd.DataFrame.from_dict(ratio_dict)
pwer_df = pair_df['pwer']
ratio_df_a = pd.DataFrame(pwer_df.pow(pair_df['pos_dict_a'])/pwer_df.pow(pair_df['neg_dict_a']), columns=[p[0]+'_to_'+p[1]+'_ratio_actb'])
ratio_df_g = pd.DataFrame(pwer_df.pow(pair_df['pos_dict_g'])/pwer_df.pow(pair_df['neg_dict_g']), columns=[p[0]+'_to_'+p[1]+'_ratio_gapdh'])
fc_all = pd.merge(ratio_df_a,ratio_df_g, right_index=True, left_index=True)
all_results = pd.merge(results_df,fc_all, right_index=True, left_index=True)
results_df = all_results.copy()
if not isinstance(fc_results_df, pd.DataFrame):
all_fc_results_df = fc_all.copy()
else:
all_fc_results_df = pd.merge(fc_results_df,fc_all, right_index=True, left_index=True)
fc_results_df = all_fc_results_df.copy()
plot_df_dict = {}
target_list = all_fc_results_df.index.tolist()
plot_df_dict['Target'] = []
plot_df_dict['Ratio ITGA8+ to ITGA8-'] = []
plot_df_dict['Control'] = []
for t in all_fc_results_df.columns.values:
control_name = t.split('_')[-1]
plot_df_dict['Target']= plot_df_dict['Target']+ target_list
plot_df_dict['Ratio ITGA8+ to ITGA8-']= plot_df_dict['Ratio ITGA8+ to ITGA8-'] +all_fc_results_df[t].tolist()
plot_df_dict['Control'] = plot_df_dict['Control']+[control_name]*len(target_list)
print(plot_df_dict)
plot_df = pd.DataFrame.from_dict(plot_df_dict)
print(plot_df)
bp = sns.boxplot(x='Target', y='Ratio ITGA8+ to ITGA8-', hue = 'Control', data=plot_df)
add_on =0
ymax = plot_df['Ratio ITGA8+ to ITGA8-'].max()
pos = np.arange(len(set(plot_df['Target'])))
print(pos)
for tick, label in zip(range(len(set(plot_df['Target']))), plot_df['Target']):
print(tick, label)
df_1 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='actb')]
df_2 = plot_df[(plot_df['Target']==label)&(plot_df['Control']=='gapdh')]
ratio_a = df_1['Ratio ITGA8+ to ITGA8-'].mean()
ratio_g = df_2['Ratio ITGA8+ to ITGA8-'].mean()
bp.text(pos[tick]-0.15, ymax + 0.1, round(ratio_a),
horizontalalignment='center', color='blue')
bp.text(pos[tick]+0.15, ymax + 0.1, round(ratio_g),
horizontalalignment='center', color='green')
plt.show()
results_df.to_csv(os.path.join(os.path.dirname(qpcr_file), 'qpcr_results_'+qpcr_file.split('/')[-1]), sep='\t')
|
mit
|
RPGOne/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
11
|
4757
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
|
bsd-3-clause
|
wzbozon/scikit-learn
|
sklearn/cluster/__init__.py
|
364
|
1228
|
"""
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
|
bsd-3-clause
|
sarahgrogan/scikit-learn
|
examples/calibration/plot_calibration.py
|
225
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
ilyapatrushev/isimage
|
setup.py
|
1
|
1132
|
from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'isimage'
, version = '0.1.2'
, packages = find_packages()
, entry_points = {'console_scripts':['analyse_image = isimage.analyse_image:main'
, 'select_images = isimage.select_images:main'
]}
, author = 'Ilya Patrushev'
, author_email = '[email protected]'
, description = 'A module and scripts for analysis of whole-mount in-situ hybridisation images.'
, long_description = read('README')
, license = 'GPL v2.0'
, package_data = {'isimage.analyse_image':['test_images/*'], 'isimage.select_images':['*.dump', '*.txt', 'test_images/*.*', 'test_images/*/*.*']}
, url = 'https://github.com/ilyapatrushev/isimage'
, install_requires = ['Pillow>=2.9.0'
, 'matplotlib>=1.4.3'
, 'numpy>=1.9.2'
, 'pandas>=0.16.0'
, 'scikit-learn>=0.15.2'
, 'scipy>=0.15.1'
, 'python-pptx>=0.5.7'
, 'pbr>=0.11'
, 'six>=1.7'
, 'funcsigs>=0.4'
]
)
|
gpl-2.0
|
GuessWhoSamFoo/pandas
|
asv_bench/benchmarks/indexing.py
|
5
|
10167
|
import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
class NumericSeriesIndexing(object):
params = [
(Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indices = {
'unique_monotonic_inc': index(range(N)),
'nonunique_monotonic_inc': index(
list(range(55)) + [54] + list(range(55, N - 1))),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_ix_array(self, index, index_structure):
self.data.ix[self.array]
def time_ix_list_like(self, index, index_structure):
self.data.ix[[800000]]
def time_ix_scalar(self, index, index_structure):
self.data.ix[800000]
def time_ix_slice(self, index, index_structure):
self.data.ix[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
class NonNumericSeriesIndexing(object):
params = [
('string', 'datetime'),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indexes = {'string': tm.makeStringIndex(N),
'datetime': date_range('1900', periods=N, freq='s')}
index = indexes[index]
if index_structure == 'nonunique_monotonic_inc':
index = index.insert(item=index[2], loc=2)[:-1]
self.s = Series(np.random.rand(N), index=index)
self.lbl = index[80000]
def time_getitem_label_slice(self, index, index_structure):
self.s[:self.lbl]
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
def time_get_value(self, index, index_structure):
with warnings.catch_warnings(record=True):
self.s.get_value(self.lbl)
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
class DataFrameStringIndexing(object):
def setup(self):
index = tm.makeStringIndex(1000)
columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=index,
columns=columns)
self.idx_scalar = index[100]
self.col_scalar = columns[10]
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
def time_get_value(self):
with warnings.catch_warnings(record=True):
self.df.get_value(self.idx_scalar, self.col_scalar)
def time_ix(self):
self.df.ix[self.idx_scalar, self.col_scalar]
def time_loc(self):
self.df.loc[self.idx_scalar, self.col_scalar]
def time_getitem_scalar(self):
self.df[self.col_scalar][self.idx_scalar]
def time_boolean_rows(self):
self.df[self.bool_indexer]
def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
class DataFrameNumericIndexing(object):
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
self.df = DataFrame(np.random.randn(10000, 5))
self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])
self.bool_indexer = [True] * 5000 + [False] * 5000
def time_iloc_dups(self):
self.df_dup.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df_dup.loc[self.idx_dupe]
def time_iloc(self):
self.df.iloc[:100, 0]
def time_loc(self):
self.df.loc[:100, 0]
def time_bool_indexer(self):
self.df[self.bool_indexer]
class Take(object):
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': Int64Index(np.arange(N)),
'datetime': date_range('2011-01-01', freq='S', periods=N)}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = [True, False, True, True, False] * 20000
def time_take(self, index):
self.s.take(self.indexer)
class MultiIndexing(object):
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
self.s = Series(np.random.randn(1000000), index=mi)
self.df = DataFrame(self.s)
n = 100000
self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000),
n),
'B': np.random.choice(range(10, 400), n),
'C': np.random.choice(range(1, 150), n),
'D': np.random.choice(range(10000, 45000), n),
'x': np.random.choice(range(400), n),
'y': np.random.choice(range(25), n)})
self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]
self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index()
def time_series_ix(self):
self.s.ix[999]
def time_frame_ix(self):
self.df.ix[999]
def time_index_slice(self):
self.mdt.loc[self.idx, :]
class IntervalIndexing(object):
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
class CategoricalIndexIndexing(object):
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': CategoricalIndex(values),
'monotonic_decr': CategoricalIndex(reversed(values)),
'non_monotonic': CategoricalIndex(list('abc' * N))}
self.data = indices[index]
self.int_scalar = 10000
self.int_list = list(range(10000))
self.cat_scalar = 'b'
self.cat_list = ['a', 'c']
def time_getitem_scalar(self, index):
self.data[self.int_scalar]
def time_getitem_slice(self, index):
self.data[:self.int_scalar]
def time_getitem_list_like(self, index):
self.data[[self.int_scalar]]
def time_getitem_list(self, index):
self.data[self.int_list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
def time_get_loc_scalar(self, index):
self.data.get_loc(self.cat_scalar)
def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
class PanelIndexing(object):
def setup(self):
with warnings.catch_warnings(record=True):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
with warnings.catch_warnings(record=True):
self.p.ix[(self.inds, self.inds, self.inds)]
class MethodLookup(object):
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_ix(self, s):
s.ix
def time_lookup_loc(self, s):
s.loc
class GetItemSingleColumn(object):
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col['A']
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
class AssignTimeseriesIndex(object):
def setup(self):
N = 100000
idx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns(object):
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(self.N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N),
allow_duplicates=True)
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
from .pandas_vb_common import setup # noqa: F401
|
bsd-3-clause
|
paultopia/auto-sklearn
|
setup.py
|
5
|
4953
|
import setuptools
from setuptools.command.install import install
from distutils.extension import Extension
import os
import shutil
import subprocess
import sys
import tarfile
import urllib
SMAC_DOWNLOAD_LOCATION = "http://aad.informatik.uni-freiburg.de/~feurerm/"
SMAC_TAR_NAME = "smac-v2.08.01-development-1.tar.gz"
#METADATA_LOCATION = "http://aad.informatik.uni-freiburg.de/~feurerm/"
#METADATA_TAR_NAME = "metadata_automl1_000.tar.gz"
RUNSOLVER_LOCATION = "http://www.cril.univ-artois.fr/~roussel/runsolver/"
RUNSOLVER_TAR_NAME = "runsolver-3.3.4.tar.bz2"
DOWNLOAD_DIRECTORY = os.path.join(os.path.dirname(__file__), ".downloads")
BINARIES_DIRECTORY = "autosklearn/binaries"
METADATA_DIRECTORY = "autosklearn/metalearning/files"
class Download(install):
def run(self):
try:
shutil.rmtree(DOWNLOAD_DIRECTORY)
except:
pass
try:
os.makedirs(DOWNLOAD_DIRECTORY)
except:
pass
for download_url, filename in [(SMAC_DOWNLOAD_LOCATION, SMAC_TAR_NAME),
#(METADATA_LOCATION, METADATA_TAR_NAME),
(RUNSOLVER_LOCATION, RUNSOLVER_TAR_NAME)]:
# This can fail ungracefully, because having these files is
# crucial to AutoSklearn!
urllib.urlretrieve(os.path.join(download_url, filename),
filename=os.path.join(DOWNLOAD_DIRECTORY, filename))
tfile = tarfile.open(os.path.join(DOWNLOAD_DIRECTORY, filename))
tfile.extractall(os.path.join(DOWNLOAD_DIRECTORY,
filename.replace(".tar.gz", "").
replace(".tar.bz2", "")))
# Build the runsolver
sys.stdout.write("Building runsolver\n")
cur_pwd = os.getcwd()
runsolver_source_path = os.path.join(DOWNLOAD_DIRECTORY,
"runsolver-3.3.4",
"runsolver", "src")
os.chdir(runsolver_source_path)
subprocess.check_call("make")
os.chdir(cur_pwd)
# Create a fresh binaries directory
try:
shutil.rmtree(BINARIES_DIRECTORY)
except:
pass
try:
os.makedirs(BINARIES_DIRECTORY)
with open(os.path.join(BINARIES_DIRECTORY, '__init__.py')):
pass
except:
pass
# Copy the runsolver into the sources so it gets copied
shutil.move(os.path.join(runsolver_source_path, "runsolver"),
os.path.join(BINARIES_DIRECTORY, "runsolver"))
# Copy SMAC
shutil.move(os.path.join(DOWNLOAD_DIRECTORY,
SMAC_TAR_NAME.replace(".tar.gz", "")),
BINARIES_DIRECTORY)
#try:
# shutil.rmtree(METADATA_DIRECTORY)
#except:
# pass
# Copy the metadata
#shutil.move(os.path.join(DOWNLOAD_DIRECTORY,
# METADATA_TAR_NAME.replace(".tar.gz", ""),
# "files"),
# METADATA_DIRECTORY)
# TODO: Normally one wants to call run(self), but this runs distutils and ignores install_requirements for unknown reasons
# if anyone knows a better way, feel free to change
install.do_egg_install(self)
#shutil.rmtree(os.path.join(METADATA_DIRECTORY))
shutil.rmtree(BINARIES_DIRECTORY)
shutil.rmtree(DOWNLOAD_DIRECTORY)
setuptools.setup(name="AutoSklearn",
description="Code to participate in the AutoML 2015 challenge.",
version="0.0.1dev",
ext_modules=[Extension("autosklearn.data.competition_c_functions",
["autosklearn/data/competition_c_functions.c"])],
packages=setuptools.find_packages(exclude=['test']),
install_requires=["numpy",
"psutil",
"pyyaml",
"scipy",
"scikit-learn==0.15.2",
"nose",
"lockfile",
"HPOlibConfigSpace",
"ParamSklearn",
"pymetalearn",
"cma"],
test_suite="nose.collector",
cmdclass={'install': Download},
scripts=["scripts/autosklearn"],
include_package_data=True,
author="Matthias Feurer",
author_email="[email protected]",
license="BSD",
platforms=['Linux'],
classifiers=[],
url='www.automl.org')
|
bsd-3-clause
|
yonglehou/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
132
|
25860
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
|
bsd-3-clause
|
nicolargo/intellij-community
|
python/helpers/pydev/pydev_ipython/inputhook.py
|
52
|
18411
|
# coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
|
apache-2.0
|
gef756/statsmodels
|
statsmodels/examples/ex_scatter_ellipse.py
|
39
|
1367
|
'''example for grid of scatter plots with probability ellipses
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import lrange
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.plot_grids import scatter_ellipse
nvars = 6
mmean = np.arange(1.,nvars+1)/nvars * 1.5
rho = 0.5
#dcorr = rho*np.ones((nvars, nvars)) + (1-rho)*np.eye(nvars)
r = np.random.uniform(-0.99, 0.99, size=(nvars, nvars))
##from scipy import stats
##r = stats.rdist.rvs(1, size=(nvars, nvars))
r = (r + r.T) / 2.
assert np.allclose(r, r.T)
mcorr = r
mcorr[lrange(nvars), lrange(nvars)] = 1
#dcorr = np.array([[1, 0.5, 0.1],[0.5, 1, -0.2], [0.1, -0.2, 1]])
mstd = np.arange(1.,nvars+1)/nvars
mcov = mcorr * np.outer(mstd, mstd)
evals = np.linalg.eigvalsh(mcov)
assert evals.min > 0 #assert positive definite
nobs = 100
data = np.random.multivariate_normal(mmean, mcov, size=nobs)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
print(dmean)
print(dcov)
dcorr = np.corrcoef(data, rowvar=0)
dcorr[np.triu_indices(nvars)] = 0
print(dcorr)
#default
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95])
#used for checking
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95], add_titles=True, keep_ticks=True)
#check varnames
varnames = ['var%d' % i for i in range(nvars)]
fig = scatter_ellipse(data, level=0.9, varnames=varnames)
plt.show()
|
bsd-3-clause
|
AyoubBelhadji/random_matrix_factorization
|
setup.py
|
1
|
4025
|
"""A setuptools based setup module.
See:
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='random_matrix_factorization',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.dev',
description='random_matrix_factorization is a Python library for randomized linear algebra.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AyoubBelhadji/random_matrix_factorization',
# Author details
author='Ayoub Belhadji',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='Random linear algebra',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy','matplotlib'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
mit
|
COHRINT/cops_and_robots
|
src/cops_and_robots/display_tools/display.py
|
1
|
10731
|
#!/usr/bin/env python
"""Manages all display elements (figures, axes, etc.)
"""
import os
import logging
import numpy as np
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from cops_and_robots.fusion.gaussian_mixture import GaussianMixture
from cops_and_robots.display_tools.human_interface import CodebookInterface, ChatInterface
class Display(object):
"""Controls the windows displayed.
Each window holds one figure. Each figure holds multiple axes. The Display
class only creates the window-figure pairs, and assumes that other classes
handle their figures' axes.
"""
window_sets = [{'Main': {'size': (14,10),
'position': (50,0),
},
},
{'Main': {'size':(13,7),
'position': (50,0),
},
'Chat': {'size':(6,2),
'position': (200,700),
'is_QtWidget': True,
},
},
{'Main': {'size':(13,7),
'position': (50,0),
},
'Codebook': {'size':(6,2),
'position': (100,700),
},
}
]
def __init__(self, window_set_id=0, show_vel_interface=False,
codebook_interface_cfg={}, chat_interface_cfg={}):
self._setup_windows(window_set_id)
self.show_vel_interface = show_vel_interface
self.codebook_interface_cfg = codebook_interface_cfg
self.chat_interface_cfg = chat_interface_cfg
def add_map(self, map_):
self.map = map_
def add_vel_states(self, vel_states, vel_space=None):
self.vel_states = vel_states
if vel_space == None:
self.vel_space = np.linspace(-1, 1, 100)
else:
self.vel_space = vel_space
if vel_states is not None:
self._setup_velocity_axes()
def add_human_interface(self, human_sensor, questioner=None):
if "Chat" in self.windows:
self._setup_chat(human_sensor)
elif "Codebook" in self.windows:
self._setup_codebook(human_sensor)
else:
logging.debug("No human interface windows available.")
return
# Print questions and answers
if questioner is not None:
self.questioner = questioner
def _setup_chat(self, human_sensor):
# fig = self.windows['Chat']['fig']
self.chat = ChatInterface(human_sensor,
**self.chat_interface_cfg)
def _setup_codebook(self, human_sensor):
fig = self.windows['Codebook']['fig']
self.codebook = CodebookInterface(fig, human_sensor,
**self.codebook_interface_cfg)
def _setup_windows(self, window_set_id):
window_set = Display.window_sets[window_set_id]
self.windows = {}
for name, window in window_set.iteritems():
try:
if window['is_QtWidget']:
self.windows[name] = window
continue
except:
logging.debug('Not a QT widget.')
window['fig'] = plt.figure(figsize=window['size'])
window['fig'].canvas.set_window_title(name)
self.windows[name] = window
# Position windows assuming Qt4 backend
mngr = plt.get_current_fig_manager()
geom = mngr.window.geometry()
x,y,w,h = geom.getRect()
new_x, new_y = window['position']
mngr.window.setGeometry(new_x, new_y, w, h)
def _setup_velocity_axes(self):
self.vel_axes = {}
for name, ax in self.map.axes.iteritems():
#<>TODO: fix for multiple figures!
# Create colormap Gaussian axes
w = 0.02
b = ax.get_position()
x_box = [None] * 4; y_box = [None] * 4 # left, bottom, width, height
x_box[0] = b.x0
x_box[1] = b.y1 * 0.9
x_box[2] = b.width
x_box[3] = w
y_box[0] = b.x1 * 1.02
y_box[1] = b.x0 * 1.743
y_box[2] = w
y_box[3] = b.height * 0.705
vx_ax = self.windows['Main'].add_axes(x_box)
vy_ax = self.windows['Main'].add_axes(y_box)
# Place ticks and labels
vx_ax.xaxis.tick_top()
vx_ax.set_xlabel('x velocity (m/timestep)')
vx_ax.xaxis.set_label_position('top')
vx_ax.get_yaxis().set_visible(False)
vy_ax.yaxis.tick_right()
vy_ax.set_ylabel('y velocity (m/timestep)')
vy_ax.yaxis.set_label_position('right')
vy_ax.get_xaxis().set_visible(False)
# # Create 1d Gaussian axes
# w = 0.06
# b = ax.get_position()
# x_box = [None] * 4; y_box = [None] * 4 # left, bottom, width, height
# x_box[0] = b.x0
# x_box[1] = b.y1 * 0.868
# x_box[2] = b.width
# x_box[3] = w
# y_box[0] = b.x1
# y_box[1] = b.x0 * 1.743
# y_box[2] = w
# y_box[3] = b.height * 0.705
# vx_ax = self.windows['Main'].add_axes(x_box)
# vy_ax = self.windows['Main'].add_axes(y_box)
# # Place important axes
# vx_ax.xaxis.tick_top()
# vx_ax.set_xlabel('x velocity (m/s)')
# vx_ax.xaxis.set_label_position('top')
# vy_ax.yaxis.tick_right()
# vy_ax.set_ylabel('y velocity (m/s)')
# vy_ax.yaxis.set_label_position('right')
# # Hide other axes
# vx_ax.get_yaxis().set_visible(False)
# vx_ax.set_axis_bgcolor([0,0,0,0])
# vx_ax.spines['bottom'].set_visible(False)
# vx_ax.spines['left'].set_visible(False)
# vx_ax.spines['right'].set_visible(False)
# vy_ax.get_xaxis().set_visible(False)
# vy_ax.set_axis_bgcolor([0,0,0,0])
# vy_ax.spines['bottom'].set_visible(False)
# vy_ax.spines['left'].set_visible(False)
# vy_ax.spines['top'].set_visible(False)
self.vel_axes[name] = {'vx': vx_ax,
'vy': vy_ax,
}
def remove_velocity(self):
for _, axes in self.vel_axes.iteritems():
vx_ax = axes['vx']
vy_ax = axes['vy']
try:
vx_contourf = axes['vx_contourf']
vy_contourf = axes['vy_contourf']
except:
logging.error('Exception!')
# # Remove lines
# try:
# for line in vx_ax.lines:
# line.remove()
# for line in vy_ax.lines:
# line.remove()
# except:
# logging.error('Exception!')
# Remove fill
try:
for coll in vx_contourf.collections:
coll.remove()
del vx_contourf
for coll in vy_contourf.collections:
coll.remove()
del vy_contourf
except:
logging.error('Exception!')
def update_velocity(self, i):
# <>TEST STUB
if np.random.random() < 0.9 and i > 1:
return
if i > 1:
self.remove_velocity()
for name, axes in self.vel_axes.iteritems():
vx_ax = axes['vx']
vy_ax = axes['vy']
v = self.vel_space
vx = self.vel_states[name].probability.marginal_pdf(x=v, axis=0)
vy = self.vel_states[name].probability.marginal_pdf(x=v, axis=1)
# # TEST STUB
# vx = GaussianMixture(np.random.random(2), (0.5 - np.random.random(2)), np.random.random(2)).pdf(v)
# vy = GaussianMixture(np.random.random(2), (0.5 - np.random.random(2)), np.random.random(2)).pdf(v)
levels = np.linspace(0, np.max((vx, vy)), 80)
vx = np.tile(vx, (2,1))
vy = np.tile(vy, (2,1))
alpha = 0.8
vx_contourf = vx_ax.contourf(v, [0,1], vx, cmap='viridis',
levels=levels, alpha=alpha,
antialiased=True, lw=0)
vy_contourf = vy_ax.contourf([0,1], v, vy.T, cmap='viridis',
levels=levels, alpha=alpha,
antialiased=True, lw=0)
self.vel_axes[name]['vx_contourf'] = vx_contourf
self.vel_axes[name]['vy_contourf'] = vy_contourf
# # This is the fix for the white lines between contour levels
# for c in vx_contourf.collections:
# c.set_edgecolor("face")
# c.set_alpha(alpha)
# for c in vy_contourf.collections:
# c.set_edgecolor("face")
# c.set_alpha(alpha)
# vx_ax.plot(v, vx, lw=2, color='g')
# vx_ax.fill_between(v, 0, vx, color='g', alpha=0.2)
# vy_ax.plot(vy, v, lw=2, color='g')
# vy_ax.fill_betweenx(v, 0, vy, color='g', alpha=0.2)
def update_question_answer(self):
if hasattr(self.questioner, 'recent_answer'):
str_ = self.questioner.recent_question + ' ' \
+ self.questioner.recent_answer
if hasattr(self, 'q_text'):
self.q_text.remove()
bbox = {'facecolor': 'white',
'alpha': 0.8,
'boxstyle':'round',
}
ax = self.windows['Main']['fig'].get_axes()[0]
self.q_text = ax.annotate(str_, xy=(-5, -4.5),
xycoords='data', annotation_clip=False,
fontsize=16, bbox=bbox)
def update(self, i):
try:
self.map.update(i)
except AttributeError, e:
logging.exception("Map not yet defined!")
raise e
if hasattr(self, 'vel_states'):
self.update_velocity(i)
if hasattr(self, 'questioner'):
self.update_question_answer()
if __name__ == '__main__':
from cops_and_robots.map_tools.map import Map
display = Display(window_set_id=0)
map_ = Map()
map_.setup_plot(fig=display.windows['Main']['fig'])
display.add_map(map_)
plt.show()
|
apache-2.0
|
plorch/FocusOnWildlifeCMPScripts
|
aggregate_cleveland_survey.py
|
1
|
5774
|
import sys, os, glob
import pandas as pd, numpy as np
import ujson
import datetime
from ast import literal_eval
from get_workflow_info import get_workflow_info, translate_non_alphanumerics, get_short_slug
from aggregate_question_utils import breakout_anno_survey, getfrac, aggregate_survey, write_class_row
classfile = 'focus-on-wildlife-cleveland-metroparks-classifications.csv'
workflow_file = 'focus-on-wildlife-cleveland-metroparks-workflows.csv'
workflow_cfile = 'focus-on-wildlife-cleveland-metroparks-workflow_contents.csv'
workflow_id = 1432
workflow_version = "478.99"
try:
classfile = sys.argv[1]
except:
print("Usage:\n%s classfile\n example classifications export file: %s\n" % (sys.argv[0], classfile))
print("Optional inputs:")
print(" workflows=projectname-workflows.csv (export from project builder)")
print(" workflow_contents=projectname-workflow_contents.csv (export from project builder)")
print(" workflow_id=N")
print(" workflow_version=N (looks like a number with format: major.minor)")
print(" outfile_class=filename.csv\n file to save exploded classifications with 1 annotation per row")
print(" outfile_agg=filename.csv\n file to save aggregated classifications")
print(" If you don't specify an outfile_class or outfile_agg, the filenames\n will be based on the input classfile name.")
print(" If you vary the project from the suggested one above, you'll need to specify workflow files.\n")
exit(0)
annofile = classfile.replace('.csv', '_annotations_1lineeach.csv')
outfile = classfile.replace('.csv', '_aggregated.csv')
outfile_huge = classfile.replace('.csv', '_aggregated_kitchensink.csv')
# check for other command-line arguments
if len(sys.argv) > 2:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
elif arg[0] == "workflow_version":
workflow_version = float(arg[1])
elif (arg[0] == "outfile_class"):
annofile = arg[1]
elif (arg[0] == "outfile_agg") | (arg[0] == "outfile"):
outfile = arg[1]
elif (arg[0] == "workflows"):
workflow_file = arg[1]
elif (arg[0] == "workflow_contents"):
workflow_cfile = arg[1]
elif (arg[0] == "class_in") | (arg[0] == "classfile") | (arg[0] == "in_class"):
infile = arg[1]
elif arg[0] == "classfile_breakout":
breakout_class = False
classfile_breakout = arg[1]
# make sure you don't overwrite even if the input file doesn't end in .csv
if classfile == annofile:
annofile = classfile + '_annotations_1lineeach.csv'
if outfile == annofile:
outfile = classfile + '_aggregated.csv'
workflow_df = pd.read_csv(workflow_file)
workflow_cdf = pd.read_csv(workflow_cfile)
workflow_info = get_workflow_info(workflow_df, workflow_cdf, workflow_id, workflow_version)
classifications = pd.read_csv(classfile, low_memory=False)
classifications['anno_json'] = [ujson.loads(q) for q in classifications.annotations]
classifications.fillna(0.0, inplace=True)
# now that we have the workflow information, we need to get the mark columns we will print
# we need both the survey ID columns and the "nothing here" etc. columns
thecols = []
for task in workflow_info['tasknames']:
if workflow_info[task]['type'] == 'survey':
# first, a column for what species was selected
thecols.append(task.lower()+'_choice')
# then columns for each request for additional sub-classification
for question in workflow_info[task]['questionsOrder']:
thecols.append("%s_%s" % (task.lower(), workflow_info[task]['questions'][question]['label_slug']))
elif workflow_info[task]['type'] == 'shortcut':
# each possible shortcut "answer" is a tickmark, i.e. True or False
# so 1 col each
for answer in workflow_info[task]['answers']:
thecols.append(answer['label_slug'])
classcols = "classification_id subject_ids created_at user_name user_id user_ip".split()
printcols = classcols + thecols
theheader = {}
for i in range(len(printcols)):
theheader[printcols[i]] = printcols[i]
# open the file
fp = open(annofile, "w")
# write the CSV header
write_class_row(fp, theheader, printcols)
# breakout and write each mark to the file
n_marks = classifications.apply(lambda row: breakout_anno_survey(row, workflow_info, fp, classcols, thecols), axis=1)
# done writing the file
fp.close()
print("%d annotations jailbroken from %d classifications, written to %s as individual marks..." % (sum(n_marks), len(classifications), annofile))
# save the number of marks per classification, in case it ends up being useful
classifications['n_marks'] = n_marks
# now re-read the csv file with the annotations
annotations = pd.read_csv(annofile)
annotations['count'] = np.ones_like(annotations.created_at)
# we need to group by subject in order to aggregate
by_subj = annotations.groupby('subject_ids')
class_agg = by_subj.apply(aggregate_survey, workflow_info=workflow_info)
# check for empty columns
all_cols = class_agg.columns.values
use_cols = (class_agg.columns.values).tolist()
for thecol in all_cols:
if sum(class_agg[thecol]) == 0.0:
use_cols.remove(thecol)
# write both the kitchen sink version and the version with no totally empty columns
class_agg.to_csv(outfile_huge)
class_agg[use_cols].to_csv(outfile)
print("\nAggregated classifications written to %s \n (kitchen sink version: %s )\n" % (outfile, outfile_huge))
# to do:
# print a summary file, one line per subject, with all species idents for that subject
#end
|
gpl-2.0
|
yousrabk/mne-python
|
examples/visualization/plot_channel_epochs_image.py
|
8
|
2653
|
"""
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
2 images are produced. One with a good channel and one with a channel
that does not see any evoked field.
It is also demonstrated how to reorder the epochs using a 1d spectral
embedding as described in:
Graph-based variability estimation in single-trial event-related neural
responses A. Gramfort, R. Keriven, M. Clerc, 2010,
Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
https://hal.inria.fr/inria-00497023
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
vmax=250, colorbar=True, order=order_func, show=True)
|
bsd-3-clause
|
quheng/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
thunderhoser/GewitterGefahr
|
gewittergefahr/gg_utils/linkage.py
|
1
|
114840
|
"""Links hazardous events to storm cells.
Currently the only "hazardous events" handled by this code are damaging
straight-line wind and tornadoes.
--- DEFINITIONS ---
"Storm cell" = a single thunderstorm (standard meteorological definition). I
will use S to denote a storm cell.
"Storm object" = one thunderstorm at one time step (snapshot of a storm cell).
I will use s to denote a storm object.
"""
import copy
import pickle
import shutil
import os.path
import warnings
import numpy
import pandas
from gewittergefahr.gg_io import raw_wind_io
from gewittergefahr.gg_io import tornado_io
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import interp
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import number_rounding
from gewittergefahr.gg_utils import temporal_tracking
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
LARGE_INTEGER = int(1e10)
LARGE_DISTANCE_METRES = float(1e12)
YEAR_FORMAT = '%Y'
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
WIND_EVENT_STRING = 'wind'
TORNADO_EVENT_STRING = 'tornado'
TORNADOGENESIS_EVENT_STRING = 'tornadogenesis'
VALID_EVENT_TYPE_STRINGS = [
WIND_EVENT_STRING, TORNADO_EVENT_STRING, TORNADOGENESIS_EVENT_STRING
]
DEFAULT_MAX_TIME_BEFORE_STORM_SEC = 300
DEFAULT_MAX_TIME_AFTER_STORM_SEC = 300
DEFAULT_BBOX_PADDING_METRES = 1e5
DEFAULT_MAX_WIND_DISTANCE_METRES = 30000.
DEFAULT_MAX_TORNADO_DISTANCE_METRES = 30000.
REQUIRED_STORM_COLUMNS = [
tracking_utils.PRIMARY_ID_COLUMN, tracking_utils.SECONDARY_ID_COLUMN,
tracking_utils.FULL_ID_COLUMN, tracking_utils.VALID_TIME_COLUMN,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN,
tracking_utils.TRACKING_START_TIME_COLUMN,
tracking_utils.TRACKING_END_TIME_COLUMN,
tracking_utils.CELL_START_TIME_COLUMN, tracking_utils.CELL_END_TIME_COLUMN,
tracking_utils.CENTROID_LATITUDE_COLUMN,
tracking_utils.CENTROID_LONGITUDE_COLUMN,
tracking_utils.LATLNG_POLYGON_COLUMN
]
REQUIRED_WIND_COLUMNS = [
raw_wind_io.STATION_ID_COLUMN, raw_wind_io.LATITUDE_COLUMN,
raw_wind_io.LONGITUDE_COLUMN, raw_wind_io.TIME_COLUMN,
raw_wind_io.U_WIND_COLUMN, raw_wind_io.V_WIND_COLUMN
]
STORM_CENTROID_X_COLUMN = 'centroid_x_metres'
STORM_CENTROID_Y_COLUMN = 'centroid_y_metres'
STORM_VERTICES_X_COLUMN = 'vertices_x_metres'
STORM_VERTICES_Y_COLUMN = 'vertices_y_metres'
EVENT_TIME_COLUMN = 'unix_time_sec'
EVENT_LATITUDE_COLUMN = 'latitude_deg'
EVENT_LONGITUDE_COLUMN = 'longitude_deg'
EVENT_X_COLUMN = 'x_coord_metres'
EVENT_Y_COLUMN = 'y_coord_metres'
NEAREST_SECONDARY_ID_COLUMN = 'nearest_secondary_id_string'
NEAREST_TIME_COLUMN = 'nearest_storm_time_unix_sec'
LINKAGE_DISTANCE_COLUMN = 'linkage_distance_metres'
TORNADO_ASSIGNED_COLUMN = 'tornado_assigned_flag'
STORM_VERTEX_X_COLUMN = 'vertex_x_metres'
STORM_VERTEX_Y_COLUMN = 'vertex_y_metres'
LINKAGE_DISTANCES_COLUMN = 'linkage_distances_metres'
RELATIVE_EVENT_TIMES_COLUMN = 'relative_event_times_sec'
EVENT_LATITUDES_COLUMN = 'event_latitudes_deg'
EVENT_LONGITUDES_COLUMN = 'event_longitudes_deg'
MAIN_OBJECT_FLAGS_COLUMN = 'main_object_flags'
MERGING_PRED_FLAG_COLUMN = 'merging_predecessor_flag'
FUJITA_RATINGS_COLUMN = 'f_or_ef_scale_ratings'
TORNADO_IDS_COLUMN = 'tornado_id_strings'
WIND_STATION_IDS_COLUMN = 'wind_station_ids'
U_WINDS_COLUMN = 'u_winds_m_s01'
V_WINDS_COLUMN = 'v_winds_m_s01'
THESE_COLUMNS = [
LINKAGE_DISTANCES_COLUMN, RELATIVE_EVENT_TIMES_COLUMN,
EVENT_LATITUDES_COLUMN, EVENT_LONGITUDES_COLUMN, MAIN_OBJECT_FLAGS_COLUMN,
MERGING_PRED_FLAG_COLUMN
]
WIND_LINKAGE_COLUMNS = THESE_COLUMNS + [
WIND_STATION_IDS_COLUMN, U_WINDS_COLUMN, V_WINDS_COLUMN
]
TORNADO_LINKAGE_COLUMNS = THESE_COLUMNS + [
FUJITA_RATINGS_COLUMN, TORNADO_IDS_COLUMN
]
REQUIRED_WIND_LINKAGE_COLUMNS = REQUIRED_STORM_COLUMNS + WIND_LINKAGE_COLUMNS
REQUIRED_TORNADO_LINKAGE_COLUMNS = (
REQUIRED_STORM_COLUMNS + TORNADO_LINKAGE_COLUMNS
)
EARLY_FLAG_COLUMN = 'in_early_period'
MAX_TIME_BEFORE_START_KEY = 'max_time_before_storm_start_sec'
MAX_TIME_AFTER_END_KEY = 'max_time_after_storm_end_sec'
STORM_INTERP_TIME_KEY = 'storm_interp_time_interval_sec'
BBOX_PADDING_KEY = 'bounding_box_padding_metres'
MAX_LINK_DISTANCE_KEY = 'max_link_distance_metres'
SECONDARY_START_TIME_COLUMN = 'secondary_cell_start_time_unix_sec'
SECONDARY_END_TIME_COLUMN = 'secondary_cell_end_time_unix_sec'
def _check_input_args(
tracking_file_names, max_time_before_storm_start_sec,
max_time_after_storm_end_sec, bounding_box_padding_metres,
storm_interp_time_interval_sec, max_link_distance_metres):
"""Error-checks input arguments.
:param tracking_file_names: 1-D list of paths to storm-tracking files
(readable by `storm_tracking_io.read_file`).
:param max_time_before_storm_start_sec: Max difference between event (E)
time and beginning of storm cell (S). If E occurs more than
`max_time_before_storm_start_sec` before beginning of S, E cannot be
linked to S.
:param max_time_after_storm_end_sec: Max difference between event (E) time
and end of storm cell (S). If E occurs more than
`max_time_after_storm_end_sec` after end of S, E cannot be linked to S.
:param bounding_box_padding_metres: Padding for bounding box around storm
objects. Events outside of this bounding box will be thrown out, which
means that they cannot be linked to storms. The purpose of the bounding
box is to reduce the number of events that must be considered, thus
reducing computing time.
:param storm_interp_time_interval_sec: Discretization time for
interpolation of storm positions. Storms will be interpolated to each
multiple of `storm_interp_time_interval_sec` between the first and
last event times. Setting `storm_interp_time_interval_sec` > 1
reduces computing time, at the cost of a slight decrease in accuracy.
:param max_link_distance_metres: Max linkage distance. If event E is >
`max_link_distance_metres` from the edge of the nearest storm, it will
not be linked to any storm.
"""
error_checking.assert_is_string_list(tracking_file_names)
error_checking.assert_is_numpy_array(
numpy.array(tracking_file_names), num_dimensions=1)
error_checking.assert_is_integer(max_time_before_storm_start_sec)
error_checking.assert_is_geq(max_time_before_storm_start_sec, 0)
error_checking.assert_is_integer(max_time_after_storm_end_sec)
error_checking.assert_is_geq(max_time_after_storm_end_sec, 0)
error_checking.assert_is_integer(storm_interp_time_interval_sec)
error_checking.assert_is_greater(storm_interp_time_interval_sec, 0)
error_checking.assert_is_geq(max_link_distance_metres, 0.)
error_checking.assert_is_geq(
bounding_box_padding_metres, max_link_distance_metres)
return {
MAX_TIME_BEFORE_START_KEY: max_time_before_storm_start_sec,
MAX_TIME_AFTER_END_KEY: max_time_after_storm_end_sec,
STORM_INTERP_TIME_KEY: storm_interp_time_interval_sec,
BBOX_PADDING_KEY: bounding_box_padding_metres,
MAX_LINK_DISTANCE_KEY: max_link_distance_metres
}
def _get_bounding_box_for_storms(
storm_object_table, padding_metres=DEFAULT_BBOX_PADDING_METRES):
"""Creates bounding box (with some padding) around all storm objects.
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param padding_metres: Padding (will be added to each edge of bounding box).
:return: x_limits_metres: length-2 numpy array with [min, max] x-coordinates
of bounding box.
:return: y_limits_metres: length-2 numpy array with [min, max] y-coordinates
of bounding box.
"""
x_min_metres = numpy.inf
x_max_metres = -numpy.inf
y_min_metres = numpy.inf
y_max_metres = -numpy.inf
num_storms = len(storm_object_table.index)
for i in range(num_storms):
x_min_metres = min([
x_min_metres,
numpy.min(storm_object_table[STORM_VERTICES_X_COLUMN].values[i])
])
x_max_metres = max([
x_max_metres,
numpy.max(storm_object_table[STORM_VERTICES_X_COLUMN].values[i])
])
y_min_metres = min([
y_min_metres,
numpy.min(storm_object_table[STORM_VERTICES_Y_COLUMN].values[i])
])
y_max_metres = max([
y_max_metres,
numpy.max(storm_object_table[STORM_VERTICES_Y_COLUMN].values[i])
])
x_limits_metres = numpy.array([
x_min_metres - padding_metres,
x_max_metres + padding_metres
])
y_limits_metres = numpy.array([
y_min_metres - padding_metres,
y_max_metres + padding_metres
])
return x_limits_metres, y_limits_metres
def _project_storms_latlng_to_xy(storm_object_table, projection_object):
"""Projects storm positions from lat-long to x-y coordinates.
This method projects both centroids and storm outlines.
V = number of vertices in a given storm outline
:param storm_object_table: pandas DataFrame created by
`_read_input_storm_tracks`.
:param projection_object: Instance of `pyproj.Proj`, defining an equidistant
projection.
:return: storm_object_table: Same as input, but with additional columns
listed below.
storm_object_table.centroid_x_metres: x-coordinate of centroid.
storm_object_table.centroid_y_metres: y-coordinate of centroid.
storm_object_table.vertices_x_metres: length-V numpy array with x-
coordinates of vertices.
storm_object_table.vertices_y_metres: length-V numpy array with y-
coordinates of vertices.
"""
centroids_x_metres, centroids_y_metres = projections.project_latlng_to_xy(
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values,
projection_object=projection_object)
nested_array = storm_object_table[[
tracking_utils.PRIMARY_ID_COLUMN, tracking_utils.PRIMARY_ID_COLUMN
]].values.tolist()
storm_object_table = storm_object_table.assign(**{
STORM_CENTROID_X_COLUMN: centroids_x_metres,
STORM_CENTROID_Y_COLUMN: centroids_y_metres,
STORM_VERTICES_X_COLUMN: nested_array,
STORM_VERTICES_Y_COLUMN: nested_array
})
num_storm_objects = len(storm_object_table.index)
for i in range(num_storm_objects):
this_vertex_dict_latlng = polygons.polygon_object_to_vertex_arrays(
storm_object_table[tracking_utils.LATLNG_POLYGON_COLUMN].values[i]
)
(storm_object_table[STORM_VERTICES_X_COLUMN].values[i],
storm_object_table[STORM_VERTICES_Y_COLUMN].values[i]
) = projections.project_latlng_to_xy(
latitudes_deg=this_vertex_dict_latlng[polygons.EXTERIOR_Y_COLUMN],
longitudes_deg=this_vertex_dict_latlng[polygons.EXTERIOR_X_COLUMN],
projection_object=projection_object)
return storm_object_table
def _project_events_latlng_to_xy(event_table, projection_object):
"""Projects event locations from lat-long to x-y coordinates.
:param event_table: pandas DataFrame with at least the following columns.
event_table.latitude_deg: Latitude (deg N).
event_table.longitude_deg: Longitude (deg E).
:param projection_object: Instance of `pyproj.Proj`, defining an equidistant
projection.
:return: event_table: Same as input, but with additional columns listed
below.
event_table.x_coord_metres: x-coordinate of event.
event_table.y_coord_metres: y-coordinate of event.
"""
x_coords_metres, y_coords_metres = projections.project_latlng_to_xy(
event_table[EVENT_LATITUDE_COLUMN].values,
event_table[EVENT_LONGITUDE_COLUMN].values,
projection_object=projection_object)
return event_table.assign(**{
EVENT_X_COLUMN: x_coords_metres,
EVENT_Y_COLUMN: y_coords_metres
})
def _filter_events_by_bounding_box(
event_table, x_limits_metres, y_limits_metres):
"""Removes events outside of bounding box.
:param event_table: pandas DataFrame with at least the following columns.
event_table.x_coord_metres: x-coordinate of event.
event_table.y_coord_metres: y-coordinate of event.
:return: x_limits_metres: length-2 numpy array with [min, max] x-coordinates
of bounding box.
:return: y_limits_metres: length-2 numpy array with [min, max] y-coordinates
of bounding box.
:return: event_table: Same as input, but possibly with fewer rows.
"""
bad_x_flags = numpy.invert(numpy.logical_and(
event_table[EVENT_X_COLUMN].values >= x_limits_metres[0],
event_table[EVENT_X_COLUMN].values <= x_limits_metres[1]
))
bad_y_flags = numpy.invert(numpy.logical_and(
event_table[EVENT_Y_COLUMN].values >= y_limits_metres[0],
event_table[EVENT_Y_COLUMN].values <= y_limits_metres[1]
))
bad_row_indices = numpy.where(
numpy.logical_or(bad_x_flags, bad_y_flags)
)[0]
return event_table.drop(
event_table.index[bad_row_indices], axis=0, inplace=False
)
def _filter_storms_by_time(storm_object_table, max_start_time_unix_sec,
min_end_time_unix_sec):
"""Filters storm cells by time.
Any storm cell with start time > `max_start_time_unix_sec`, or end time <
`min_end_time_unix_sec`, will be removed.
:param storm_object_table: pandas DataFrame with at least the following
columns. Each row is one storm object.
storm_object_table.cell_start_time_unix_sec: First time in corresponding
storm cell.
storm_object_table.cell_end_time_unix_sec: Last time in corresponding storm
cell.
:param max_start_time_unix_sec: Latest allowed start time.
:param min_end_time_unix_sec: Earliest allowed end time.
:return: storm_object_table: Same as input, but possibly with fewer rows.
"""
bad_row_flags = numpy.invert(numpy.logical_and(
storm_object_table[tracking_utils.CELL_START_TIME_COLUMN].values <=
max_start_time_unix_sec,
storm_object_table[tracking_utils.CELL_END_TIME_COLUMN].values >=
min_end_time_unix_sec
))
bad_row_indices = numpy.where(bad_row_flags)[0]
return storm_object_table.drop(
storm_object_table.index[bad_row_indices], axis=0, inplace=False
)
def _interp_one_storm_in_time(storm_object_table_1cell, secondary_id_string,
target_time_unix_sec):
"""Interpolates one storm cell in time.
The storm object nearest to the target time is advected (i.e., moved as a
solid body, so its shape does not change) to the target time. Radar data
usually have a time interval of <= 5 minutes, so interpolation is usually
over <= 2.5 minutes, and we assume that changes in shape over this time are
negligible.
V = number of vertices in a given storm outline
:param storm_object_table_1cell: pandas DataFrame with at least the
following columns. Each row is one storm object, and this table should
contain data for only one storm cell. In other words, the ID for each
storm object should be the same.
storm_object_table_1cell.unix_time_sec: Valid time.
storm_object_table_1cell.centroid_x_metres: x-coordinate of centroid.
storm_object_table_1cell.centroid_y_metres: y-coordinate of centroid.
storm_object_table_1cell.vertices_x_metres: length-V numpy array with x-
coordinates of vertices.
storm_object_table_1cell.vertices_y_metres: length-V numpy array with y-
coordinates of vertices.
:param secondary_id_string: Secondary storm ID.
:param target_time_unix_sec: Target time. Storm cell will be interpolated
to this time.
:return: interp_vertex_table_1object: pandas DataFrame with the following
columns. Each row is one vertex of the interpolated storm outline.
interp_vertex_table_1object.secondary_id_string: Secondary storm ID (same as
input).
interp_vertex_table_1object.vertex_x_metres: x-coordinate of vertex.
interp_vertex_table_1object.vertex_y_metres: y-coordinate of vertex.
"""
valid_times_unix_sec, orig_to_unique_indices = numpy.unique(
storm_object_table_1cell[tracking_utils.VALID_TIME_COLUMN].values,
return_inverse=True)
num_times = len(valid_times_unix_sec)
x_centroids_metres = numpy.full(num_times, numpy.nan)
y_centroids_metres = numpy.full(num_times, numpy.nan)
x_vertices_by_time_metres = [numpy.array([], dtype=float)] * num_times
y_vertices_by_time_metres = [numpy.array([], dtype=float)] * num_times
for i in range(num_times):
these_orig_indices = numpy.where(orig_to_unique_indices == i)[0]
x_centroids_metres[i] = numpy.mean(
storm_object_table_1cell[STORM_CENTROID_X_COLUMN].values[
these_orig_indices]
)
y_centroids_metres[i] = numpy.mean(
storm_object_table_1cell[STORM_CENTROID_Y_COLUMN].values[
these_orig_indices]
)
for j in these_orig_indices:
this_x_offset_metres = (
x_centroids_metres[i] -
storm_object_table_1cell[STORM_CENTROID_X_COLUMN].values[j]
)
this_y_offset_metres = (
y_centroids_metres[i] -
storm_object_table_1cell[STORM_CENTROID_Y_COLUMN].values[j]
)
these_x_vertices_metres = (
this_x_offset_metres +
storm_object_table_1cell[STORM_VERTICES_X_COLUMN].values[j]
)
x_vertices_by_time_metres[i] = numpy.concatenate((
x_vertices_by_time_metres[i], these_x_vertices_metres
))
these_y_vertices_metres = (
this_y_offset_metres +
storm_object_table_1cell[STORM_VERTICES_Y_COLUMN].values[j]
)
y_vertices_by_time_metres[i] = numpy.concatenate((
y_vertices_by_time_metres[i], these_y_vertices_metres
))
centroid_matrix = numpy.vstack((x_centroids_metres, y_centroids_metres))
interp_centroid_vector = interp.interp_in_time(
input_matrix=centroid_matrix,
sorted_input_times_unix_sec=valid_times_unix_sec,
query_times_unix_sec=numpy.array([target_time_unix_sec]),
method_string=interp.LINEAR_METHOD_STRING, extrapolate=True)
absolute_time_diffs_sec = numpy.absolute(
valid_times_unix_sec - target_time_unix_sec
)
nearest_time_index = numpy.argmin(absolute_time_diffs_sec)
new_x_vertices_metres = (
interp_centroid_vector[0] - x_centroids_metres[nearest_time_index] +
x_vertices_by_time_metres[nearest_time_index]
)
new_y_vertices_metres = (
interp_centroid_vector[1] - y_centroids_metres[nearest_time_index] +
y_vertices_by_time_metres[nearest_time_index]
)
num_vertices = len(new_x_vertices_metres)
return pandas.DataFrame.from_dict({
tracking_utils.SECONDARY_ID_COLUMN:
[secondary_id_string] * num_vertices,
STORM_VERTEX_X_COLUMN: new_x_vertices_metres,
STORM_VERTEX_Y_COLUMN: new_y_vertices_metres
})
def _find_secondary_start_end_times(storm_object_table):
"""Finds start/end times for each secondary storm ID.
:param storm_object_table: pandas DataFrame with at least the following
columns.
storm_object_table['valid_time_unix_sec']: Valid time.
storm_object_table['secondary_id_string']: Secondary ID.
:return: storm_object_table: Same as input but with the following new
columns.
storm_object_table['secondary_cell_start_time_unix_sec']: Start time for
secondary ID.
storm_object_table['secondary_cell_end_time_unix_sec']: End time for
secondary ID.
"""
# TODO(thunderhoser): This could use a unit test.
if SECONDARY_START_TIME_COLUMN in list(storm_object_table):
return storm_object_table
unique_secondary_id_strings, orig_to_unique_indices = numpy.unique(
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values,
return_inverse=True
)
num_objects = len(storm_object_table.index)
storm_object_table = storm_object_table.assign(**{
SECONDARY_START_TIME_COLUMN: numpy.full(num_objects, -1, dtype=int),
SECONDARY_END_TIME_COLUMN: numpy.full(num_objects, -1, dtype=int)
})
num_secondary_cells = len(unique_secondary_id_strings)
for j in range(num_secondary_cells):
these_object_indices = numpy.where(orig_to_unique_indices == j)[0]
this_start_time_unix_sec = numpy.min(
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[
these_object_indices]
)
this_end_time_unix_sec = numpy.max(
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[
these_object_indices]
)
storm_object_table[SECONDARY_START_TIME_COLUMN].values[
these_object_indices
] = this_start_time_unix_sec
storm_object_table[SECONDARY_END_TIME_COLUMN].values[
these_object_indices
] = this_end_time_unix_sec
return storm_object_table
def _interp_storms_in_time(storm_object_table, target_time_unix_sec,
max_time_before_start_sec, max_time_after_end_sec):
"""Interpolates each storm cell in time.
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param target_time_unix_sec: Target time. Storm cells will be interpolated
to this time.
:param max_time_before_start_sec: Max extrapolation time before beginning of
storm cell.
:param max_time_after_end_sec: Max extrapolation time after end of storm
cell.
:return: interp_vertex_table: pandas DataFrame with the following columns.
Each row is one vertex of one interpolated storm object.
interp_vertex_table.secondary_id_string: Secondary ID for storm cell.
interp_vertex_table.vertex_x_metres: x-coordinate of vertex.
interp_vertex_table.vertex_y_metres: y-coordinate of vertex.
"""
storm_object_table = _find_secondary_start_end_times(storm_object_table)
max_start_time_unix_sec = target_time_unix_sec + max_time_before_start_sec
min_end_time_unix_sec = target_time_unix_sec - max_time_after_end_sec
sorted_storm_object_table = storm_object_table.loc[
(storm_object_table[SECONDARY_START_TIME_COLUMN]
<= max_start_time_unix_sec + 1800) &
(storm_object_table[SECONDARY_END_TIME_COLUMN] >=
min_end_time_unix_sec - 1800)
]
sorted_storm_object_table = sorted_storm_object_table.sort_values(
tracking_utils.VALID_TIME_COLUMN, axis=0, ascending=True,
inplace=False
)
unique_secondary_id_strings = numpy.unique(numpy.array(
sorted_storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values
))
list_of_vertex_tables = []
num_storm_cells = len(unique_secondary_id_strings)
for j in range(num_storm_cells):
these_main_rows = numpy.where(
sorted_storm_object_table[tracking_utils.SECONDARY_ID_COLUMN] ==
unique_secondary_id_strings[j]
)[0]
these_predecessor_rows = temporal_tracking.find_immediate_predecessors(
storm_object_table=sorted_storm_object_table,
target_row=these_main_rows[0]
)
these_successor_rows = temporal_tracking.find_immediate_successors(
storm_object_table=sorted_storm_object_table,
target_row=these_main_rows[-1]
)
these_rows = numpy.concatenate((
these_predecessor_rows, these_main_rows, these_successor_rows
))
if len(these_rows) == 1:
continue
this_start_time_unix_sec = sorted_storm_object_table[
tracking_utils.VALID_TIME_COLUMN
].values[these_main_rows[0]]
if this_start_time_unix_sec > max_start_time_unix_sec:
continue
this_end_time_unix_sec = sorted_storm_object_table[
tracking_utils.VALID_TIME_COLUMN
].values[these_main_rows[-1]]
if this_end_time_unix_sec < min_end_time_unix_sec:
continue
list_of_vertex_tables.append(
_interp_one_storm_in_time(
storm_object_table_1cell=sorted_storm_object_table.iloc[
these_rows],
secondary_id_string=unique_secondary_id_strings[j],
target_time_unix_sec=target_time_unix_sec)
)
if len(list_of_vertex_tables) == 1:
continue
list_of_vertex_tables[-1] = list_of_vertex_tables[-1].align(
list_of_vertex_tables[0], axis=1
)[0]
if len(list_of_vertex_tables) == 0:
return pandas.DataFrame(
columns=[tracking_utils.SECONDARY_ID_COLUMN, STORM_VERTEX_X_COLUMN,
STORM_VERTEX_Y_COLUMN]
)
return pandas.concat(list_of_vertex_tables, axis=0, ignore_index=True)
def _find_nearest_storms_one_time(
interp_vertex_table, event_x_coords_metres, event_y_coords_metres,
max_link_distance_metres, max_polygon_attempt_distance_metres=30000.):
"""Finds nearest storm to each event.
In this case all events are at the same time.
N = number of events
:param interp_vertex_table: pandas DataFrame created by
`_interp_storms_in_time`.
:param event_x_coords_metres: length-N numpy array with x-coordinates of
events.
:param event_y_coords_metres: length-N numpy array with y-coordinates of
events.
:param max_link_distance_metres: Max linkage distance. If the nearest storm
edge to event E is > `max_link_distance_metres` away, event E will not
be linked to any storm.
:param max_polygon_attempt_distance_metres: Max distance for attempting to
place event inside storm.
:return: nearest_secondary_id_strings: length-N list, where
nearest_secondary_id_strings[i] = secondary ID of nearest storm to [i]th
event. If nearest_secondary_id_strings[i] = None, no storm was linked
to [i]th event.
:return: linkage_distances_metres: length-N numpy array of linkage
distances. If linkage_distances_metres[i] = NaN, [i]th event was not
linked to any storm.
"""
max_polygon_attempt_distance_metres = max([
max_polygon_attempt_distance_metres, 2 * max_link_distance_metres
])
unique_secondary_id_strings, orig_to_unique_indices = numpy.unique(
interp_vertex_table[tracking_utils.SECONDARY_ID_COLUMN].values,
return_inverse=True)
unique_secondary_id_strings = unique_secondary_id_strings.tolist()
num_events = len(event_x_coords_metres)
nearest_secondary_id_strings = [None] * num_events
linkage_distances_metres = numpy.full(num_events, numpy.nan)
for k in range(num_events):
these_x_diffs_metres = numpy.absolute(
event_x_coords_metres[k] -
interp_vertex_table[STORM_VERTEX_X_COLUMN].values
)
these_y_diffs_metres = numpy.absolute(
event_y_coords_metres[k] -
interp_vertex_table[STORM_VERTEX_Y_COLUMN].values
)
these_vertex_indices = numpy.where(numpy.logical_and(
these_x_diffs_metres <= max_polygon_attempt_distance_metres,
these_y_diffs_metres <= max_polygon_attempt_distance_metres
))[0]
if len(these_vertex_indices) == 0:
continue
# Try placing event inside storm.
these_secondary_id_strings = numpy.unique(
interp_vertex_table[tracking_utils.SECONDARY_ID_COLUMN].values[
these_vertex_indices]
).tolist()
for this_secondary_id_string in these_secondary_id_strings:
this_storm_indices = numpy.where(
orig_to_unique_indices ==
unique_secondary_id_strings.index(this_secondary_id_string)
)[0]
this_polygon_object = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=interp_vertex_table[
STORM_VERTEX_X_COLUMN].values[this_storm_indices],
exterior_y_coords=interp_vertex_table[
STORM_VERTEX_Y_COLUMN].values[this_storm_indices]
)
this_event_in_polygon = polygons.point_in_or_on_polygon(
polygon_object=this_polygon_object,
query_x_coordinate=event_x_coords_metres[k],
query_y_coordinate=event_y_coords_metres[k]
)
if not this_event_in_polygon:
continue
nearest_secondary_id_strings[k] = this_secondary_id_string
linkage_distances_metres[k] = 0.
break
if nearest_secondary_id_strings[k] is not None:
continue
# Try placing event near storm.
these_vertex_indices = numpy.where(numpy.logical_and(
these_x_diffs_metres <= max_link_distance_metres,
these_y_diffs_metres <= max_link_distance_metres
))[0]
if len(these_vertex_indices) == 0:
continue
these_distances_metres = numpy.sqrt(
these_x_diffs_metres[these_vertex_indices] ** 2 +
these_y_diffs_metres[these_vertex_indices] ** 2
)
if not numpy.any(these_distances_metres <= max_link_distance_metres):
continue
this_min_index = these_vertex_indices[
numpy.argmin(these_distances_metres)
]
nearest_secondary_id_strings[k] = interp_vertex_table[
tracking_utils.SECONDARY_ID_COLUMN
].values[this_min_index]
this_storm_indices = numpy.where(
orig_to_unique_indices ==
unique_secondary_id_strings.index(nearest_secondary_id_strings[k])
)[0]
this_polygon_object = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=interp_vertex_table[STORM_VERTEX_X_COLUMN].values[
this_storm_indices],
exterior_y_coords=interp_vertex_table[STORM_VERTEX_Y_COLUMN].values[
this_storm_indices]
)
this_event_in_polygon = polygons.point_in_or_on_polygon(
polygon_object=this_polygon_object,
query_x_coordinate=event_x_coords_metres[k],
query_y_coordinate=event_y_coords_metres[k])
if this_event_in_polygon:
linkage_distances_metres[k] = 0.
else:
linkage_distances_metres[k] = numpy.min(these_distances_metres)
return nearest_secondary_id_strings, linkage_distances_metres
def _finish_tornado_linkage(
storm_object_table, tornado_to_storm_table, tornado_row,
nearest_secondary_id_string, nearest_storm_time_unix_sec,
nearest_distance_metres):
"""Finishes linking occurrence (not genesis) for one tornado to storm.
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param tornado_to_storm_table: See output doc for `_find_nearest_storms`.
:param tornado_row: Row index into `tornado_to_storm_table`. Will link
only this event (this tornado at this time).
:param nearest_secondary_id_string: Secondary ID of nearest storm object.
:param nearest_storm_time_unix_sec: Time of nearest storm object.
:param nearest_distance_metres: Distance to nearest storm object.
:return: event_table: Same as input but with different linkage values.
"""
# Housekeeping.
event_time_unix_sec = tornado_to_storm_table[EVENT_TIME_COLUMN].values[
tornado_row]
tornado_id_string = tornado_to_storm_table[
tornado_io.TORNADO_ID_COLUMN].values[tornado_row]
# Find track segments of this tornado that have not yet been linked to a
# storm.
relevant_tornado_rows = numpy.where(numpy.logical_and(
tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values ==
tornado_id_string,
numpy.isnan(tornado_to_storm_table[LINKAGE_DISTANCE_COLUMN].values)
))[0]
sort_indices = numpy.argsort(
tornado_to_storm_table[EVENT_TIME_COLUMN].values[relevant_tornado_rows]
)
relevant_tornado_rows = relevant_tornado_rows[sort_indices]
relevant_tornado_times_unix_sec = tornado_to_storm_table[
EVENT_TIME_COLUMN].values[relevant_tornado_rows]
# Find main storm object (to which one track segment was just linked).
storm_cell_rows = numpy.where(
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==
nearest_secondary_id_string
)[0]
storm_cell_times_unix_sec = storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values[storm_cell_rows]
this_subrow = numpy.argmin(numpy.absolute(
storm_cell_times_unix_sec - nearest_storm_time_unix_sec
))
main_storm_object_row = storm_cell_rows[this_subrow]
# Find "relevant" storm objects (all non-splitting successors of main storm
# object).
these_rows = temporal_tracking.find_successors(
storm_object_table=storm_object_table, target_row=main_storm_object_row,
num_seconds_forward=LARGE_INTEGER, max_num_sec_id_changes=0,
change_type_string=temporal_tracking.SPLIT_STRING,
return_all_on_path=True)
relevant_storm_object_table = storm_object_table.iloc[these_rows]
these_times_unix_sec = relevant_storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values
first_good_time_unix_sec = min([
numpy.min(these_times_unix_sec), event_time_unix_sec
])
last_good_time_unix_sec = max([
numpy.max(these_times_unix_sec), event_time_unix_sec
])
# Find first track segment that occurs <= first relevant storm time.
early_subrows = numpy.where(
relevant_tornado_times_unix_sec <= first_good_time_unix_sec
)[0]
if len(early_subrows) == 0:
first_subrow = 0
else:
first_subrow = early_subrows[-1]
# Find last track segment that occurs >= last relevant storm time.
late_subrows = numpy.where(
relevant_tornado_times_unix_sec >= last_good_time_unix_sec
)[0]
if len(late_subrows) == 0:
last_subrow = len(relevant_tornado_rows) - 1
else:
last_subrow = late_subrows[0]
# Link track segments to storm objects.
relevant_tornado_rows = relevant_tornado_rows[
first_subrow:(last_subrow + 1)
]
tornado_to_storm_table[LINKAGE_DISTANCE_COLUMN].values[
relevant_tornado_rows
] = nearest_distance_metres
for k in relevant_tornado_rows:
this_event_time_unix_sec = tornado_to_storm_table[
EVENT_TIME_COLUMN].values[k]
tornado_to_storm_table[NEAREST_TIME_COLUMN].values[k] = (
this_event_time_unix_sec
)
this_storm_object_row = numpy.argmin(numpy.absolute(
relevant_storm_object_table[tracking_utils.VALID_TIME_COLUMN].values
- this_event_time_unix_sec
))
# tornado_to_storm_table[NEAREST_TIME_COLUMN].values[k] = (
# nearest_storm_time_unix_sec
# )
#
# this_storm_object_row = numpy.argmin(numpy.absolute(
# relevant_storm_object_table[tracking_utils.VALID_TIME_COLUMN].values
# - tornado_to_storm_table[EVENT_TIME_COLUMN].values[k]
# ))
tornado_to_storm_table[NEAREST_SECONDARY_ID_COLUMN].values[k] = (
relevant_storm_object_table[
tracking_utils.SECONDARY_ID_COLUMN
].values[this_storm_object_row]
)
these_rows = numpy.where(
tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values ==
tornado_id_string
)[0]
tornado_to_storm_table[TORNADO_ASSIGNED_COLUMN].values[these_rows] = True
return tornado_to_storm_table
def _link_tornado_to_new_storm(
storm_object_table, tornado_to_storm_table, tornado_row,
max_time_before_storm_start_sec, max_time_after_storm_end_sec):
"""Links one tornado to new storm.
In this case, previous time steps of the tornado have already been linked.
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param tornado_to_storm_table: See output doc for `_find_nearest_storms`.
:param tornado_row: Row index into `tornado_to_storm_table`. Will link
only this event (this tornado at this time).
:param max_time_before_storm_start_sec: See doc for `_find_nearest_storms`.
:param max_time_after_storm_end_sec: Same.
:return: tornado_to_storm_table: Same as input but maybe with different
linkage values.
"""
# Housekeeping.
event_time_unix_sec = tornado_to_storm_table[
EVENT_TIME_COLUMN].values[tornado_row]
event_time_string = time_conversion.unix_sec_to_string(
event_time_unix_sec, TIME_FORMAT)
print('Trying to link tornado at {0:s} to NEW storm...'.format(
event_time_string))
# Find track segments of this tornado that have already been linked.
event_linked_flags = numpy.array([
s is not None
for s in tornado_to_storm_table[NEAREST_SECONDARY_ID_COLUMN].values
], dtype=bool)
tornado_rows = numpy.where(numpy.logical_and(
tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values ==
tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values[
tornado_row],
event_linked_flags
))[0]
# Find *latest* segment of this tornado that has already been linked.
this_subrow = numpy.argmax(
tornado_to_storm_table[EVENT_TIME_COLUMN].values[tornado_rows]
)
last_assigned_event_row = tornado_rows[this_subrow]
# Find storm object (s*) to which this segment was linked.
last_assigned_sec_id_string = tornado_to_storm_table[
NEAREST_SECONDARY_ID_COLUMN].values[last_assigned_event_row]
last_assigned_cell_rows = numpy.where(
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==
last_assigned_sec_id_string
)[0]
this_subrow = numpy.argmax(
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[
last_assigned_cell_rows]
)
last_assigned_object_row = last_assigned_cell_rows[this_subrow]
# Find successors of storm object s*. Try simple successors first (useful
# when sharing linkages between two periods).
relevant_object_rows = temporal_tracking.find_successors(
storm_object_table=storm_object_table,
target_row=last_assigned_object_row,
num_seconds_forward=LARGE_INTEGER, return_all_on_path=True)
relevant_sec_id_strings = numpy.unique(
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values[
relevant_object_rows]
)
# Try linking other segments of the tornado track only to successors of s*.
interp_vertex_table = _interp_storms_in_time(
storm_object_table=storm_object_table,
target_time_unix_sec=event_time_unix_sec,
max_time_before_start_sec=max_time_before_storm_start_sec,
max_time_after_end_sec=max_time_after_storm_end_sec)
interp_vertex_table = interp_vertex_table.loc[
interp_vertex_table[tracking_utils.SECONDARY_ID_COLUMN].isin(
relevant_sec_id_strings)
]
nearest_secondary_id_strings, nearest_distances_metres = (
_find_nearest_storms_one_time(
interp_vertex_table=interp_vertex_table,
event_x_coords_metres=
tornado_to_storm_table[EVENT_X_COLUMN].values[[tornado_row]],
event_y_coords_metres=
tornado_to_storm_table[EVENT_Y_COLUMN].values[[tornado_row]],
max_link_distance_metres=LARGE_DISTANCE_METRES)
)
if nearest_secondary_id_strings[0] is None:
return tornado_to_storm_table
return _finish_tornado_linkage(
storm_object_table=storm_object_table,
tornado_to_storm_table=tornado_to_storm_table, tornado_row=tornado_row,
nearest_secondary_id_string=nearest_secondary_id_strings[0],
nearest_storm_time_unix_sec=event_time_unix_sec,
nearest_distance_metres=nearest_distances_metres[0]
)
def _find_nearest_storms(
storm_object_table, event_table, max_time_before_storm_start_sec,
max_time_after_storm_end_sec, interp_time_interval_sec,
max_link_distance_metres, event_type_string):
"""Finds nearest storm to each event.
In this case the events may be at different times.
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param event_table: pandas DataFrame created by
`_filter_events_by_bounding_box`.
:param max_time_before_storm_start_sec: See doc for `_check_input_args`.
:param max_time_after_storm_end_sec: Same.
:param interp_time_interval_sec: Same.
:param max_link_distance_metres: Same.
:param event_type_string: Event type (must be accepted by
`check_event_type`).
:return: event_to_storm_table: Same as input argument `event_table`, but
with the following additional columns.
event_to_storm_table.nearest_secondary_id_string: Secondary ID of nearest
storm object. If event was not linked to a storm, this is None.
event_to_storm_table.nearest_storm_time_unix_sec: Valid time of nearest
storm object. If event was not linked to a storm, this is -1.
event_to_storm_table.linkage_distance_metres: Distance between event and
edge of nearest storm object. If event was not linked to a storm, this
is NaN.
"""
num_events = len(event_table.index)
event_table = event_table.assign(**{
NEAREST_SECONDARY_ID_COLUMN: [None] * num_events,
NEAREST_TIME_COLUMN: numpy.full(num_events, -1, dtype=int),
LINKAGE_DISTANCE_COLUMN: numpy.full(num_events, numpy.nan)
})
if event_type_string == TORNADO_EVENT_STRING:
event_table = event_table.assign(**{
TORNADO_ASSIGNED_COLUMN: numpy.full(num_events, False, dtype=bool)
})
interp_times_unix_sec = number_rounding.round_to_nearest(
event_table[EVENT_TIME_COLUMN].values, interp_time_interval_sec
)
interp_times_unix_sec = numpy.round(interp_times_unix_sec).astype(int)
unique_interp_times_unix_sec, orig_to_unique_indices = numpy.unique(
interp_times_unix_sec, return_inverse=True
)
unique_interp_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in unique_interp_times_unix_sec
]
num_unique_interp_times = len(unique_interp_time_strings)
for i in range(num_unique_interp_times):
if event_type_string == TORNADO_EVENT_STRING:
event_unassigned_flags = numpy.array([
s is None
for s in event_table[NEAREST_SECONDARY_ID_COLUMN].values
], dtype=bool)
these_flags = numpy.logical_and(
event_unassigned_flags,
numpy.invert(event_table[TORNADO_ASSIGNED_COLUMN].values)
)
these_event_rows = numpy.where(numpy.logical_and(
orig_to_unique_indices == i, these_flags
))[0]
else:
these_event_rows = numpy.where(orig_to_unique_indices == i)[0]
if len(these_event_rows) == 0:
continue
print('Linking events at ~{0:s} to storms...'.format(
unique_interp_time_strings[i]
))
this_interp_vertex_table = _interp_storms_in_time(
storm_object_table=storm_object_table,
target_time_unix_sec=unique_interp_times_unix_sec[i],
max_time_before_start_sec=max_time_before_storm_start_sec,
max_time_after_end_sec=max_time_after_storm_end_sec)
these_nearest_id_strings, these_link_distances_metres = (
_find_nearest_storms_one_time(
interp_vertex_table=this_interp_vertex_table,
event_x_coords_metres=event_table[EVENT_X_COLUMN].values[
these_event_rows],
event_y_coords_metres=event_table[EVENT_Y_COLUMN].values[
these_event_rows],
max_link_distance_metres=max_link_distance_metres)
)
if event_type_string != TORNADO_EVENT_STRING:
event_table[LINKAGE_DISTANCE_COLUMN].values[
these_event_rows] = these_link_distances_metres
for j in range(len(these_event_rows)):
if these_nearest_id_strings[j] is None:
continue
k = these_event_rows[j]
event_table[NEAREST_SECONDARY_ID_COLUMN].values[k] = (
these_nearest_id_strings[j]
)
event_table[NEAREST_TIME_COLUMN].values[k] = (
unique_interp_times_unix_sec[i]
)
continue
for j in range(len(these_event_rows)):
if these_nearest_id_strings[j] is None:
continue
event_table = _finish_tornado_linkage(
storm_object_table=storm_object_table,
tornado_to_storm_table=event_table,
tornado_row=these_event_rows[j],
nearest_secondary_id_string=these_nearest_id_strings[j],
nearest_storm_time_unix_sec=unique_interp_times_unix_sec[i],
nearest_distance_metres=these_link_distances_metres[j]
)
if event_type_string == TORNADO_EVENT_STRING:
for i in range(num_unique_interp_times):
event_unassigned_flags = numpy.array([
s is None
for s in event_table[NEAREST_SECONDARY_ID_COLUMN].values
], dtype=bool)
these_flags = numpy.logical_and(
event_unassigned_flags,
event_table[TORNADO_ASSIGNED_COLUMN].values
)
these_event_rows = numpy.where(numpy.logical_and(
orig_to_unique_indices == i, these_flags
))[0]
for j in these_event_rows:
event_table = _link_tornado_to_new_storm(
storm_object_table=storm_object_table,
tornado_to_storm_table=event_table, tornado_row=j,
max_time_before_storm_start_sec=
max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec)
unlinked_indices = numpy.where(
numpy.isnan(event_table[LINKAGE_DISTANCE_COLUMN].values)
)[0]
min_storm_latitude_deg = numpy.min(
storm_object_table[tracking_utils.CENTROID_LATITUDE_COLUMN].values
)
max_storm_latitude_deg = numpy.max(
storm_object_table[tracking_utils.CENTROID_LATITUDE_COLUMN].values
)
min_storm_longitude_deg = numpy.min(
storm_object_table[tracking_utils.CENTROID_LONGITUDE_COLUMN].values
)
max_storm_longitude_deg = numpy.max(
storm_object_table[tracking_utils.CENTROID_LONGITUDE_COLUMN].values
)
for this_index in unlinked_indices:
warning_string = (
'Event at ({0:.2f} deg N, {1:.2f} deg E) COULD NOT BE LINKED to any'
' storm in box ({2:.2f}...{3:.2f} deg N, {4:.2f}...{5:.2f} deg E).'
).format(
event_table[EVENT_LATITUDE_COLUMN].values[this_index],
event_table[EVENT_LONGITUDE_COLUMN].values[this_index],
min_storm_latitude_deg, max_storm_latitude_deg,
min_storm_longitude_deg, max_storm_longitude_deg
)
warnings.warn(warning_string)
latitude_in_bbox_flags = numpy.logical_and(
event_table[EVENT_LATITUDE_COLUMN].values[unlinked_indices] >=
min_storm_latitude_deg,
event_table[EVENT_LATITUDE_COLUMN].values[unlinked_indices] <=
max_storm_latitude_deg
)
longitude_in_bbox_flags = numpy.logical_and(
event_table[EVENT_LONGITUDE_COLUMN].values[unlinked_indices] >=
min_storm_longitude_deg,
event_table[EVENT_LONGITUDE_COLUMN].values[unlinked_indices] <=
max_storm_longitude_deg
)
in_bbox_flags = numpy.logical_and(
latitude_in_bbox_flags, longitude_in_bbox_flags)
num_unlinked_events = len(unlinked_indices)
num_unlinked_events_in_bbox = numpy.sum(in_bbox_flags)
log_string = (
'Num events = {0:d} ... storm objects = {1:d} ... unlinked events '
'(in storm bounding box) = {2:d} ({3:d})'
).format(
len(event_table.index), len(storm_object_table.index),
num_unlinked_events, num_unlinked_events_in_bbox
)
if tornado_io.TORNADO_ID_COLUMN in event_table:
unlinked_id_strings = numpy.unique(
event_table[tornado_io.TORNADO_ID_COLUMN].values[unlinked_indices]
)
unlinked_indices_in_bbox = unlinked_indices[in_bbox_flags]
unlinked_id_strings_in_bbox = numpy.unique(
event_table[tornado_io.TORNADO_ID_COLUMN].values[
unlinked_indices_in_bbox]
)
log_string += (
' ... unlinked tornadoes (in storm bounding box) = {0:d} ({1:d})'
).format(
len(unlinked_id_strings), len(unlinked_id_strings_in_bbox)
)
print(log_string)
return event_table
def _find_predecessors(storm_to_events_table, target_row):
"""Finds simple and merging predecessors of a storm object.
A "simple predecessor" of storm object S is connected to S by no more than
one split and zero mergers.
A "merging predecessor" of storm object S is connected to S by exactly one
merger and zero splits.
:param storm_to_events_table: pandas DataFrame created by
`_reverse_wind_linkages` or `_reverse_tornado_linkages`.
:param target_row: Same.
:return: simple_predecessor_rows: 1-D numpy array with row indices of simple
predecessors.
:return: merging_predecessor_rows: 1-D numpy array with row indices of
merging predecessors.
"""
predecessor_rows_one_change = temporal_tracking.find_predecessors(
storm_object_table=storm_to_events_table, target_row=target_row,
num_seconds_back=LARGE_INTEGER, max_num_sec_id_changes=1,
change_type_string=temporal_tracking.ANY_CHANGE_STRING,
return_all_on_path=True)
predecessor_rows_zero_mergers = temporal_tracking.find_predecessors(
storm_object_table=storm_to_events_table, target_row=target_row,
num_seconds_back=LARGE_INTEGER, max_num_sec_id_changes=0,
change_type_string=temporal_tracking.MERGER_STRING,
return_all_on_path=True)
predecessor_rows_one_change = predecessor_rows_one_change.tolist()
predecessor_rows_zero_mergers = predecessor_rows_zero_mergers.tolist()
simple_predecessor_rows = (
set(predecessor_rows_one_change) & set(predecessor_rows_zero_mergers)
)
predecessor_rows_one_merger = temporal_tracking.find_predecessors(
storm_object_table=storm_to_events_table, target_row=target_row,
num_seconds_back=LARGE_INTEGER, max_num_sec_id_changes=1,
change_type_string=temporal_tracking.MERGER_STRING,
return_all_on_path=True)
predecessor_rows_one_merger = predecessor_rows_one_merger.tolist()
merging_predecessor_rows = (
set(predecessor_rows_one_merger) & set(predecessor_rows_one_change)
) - simple_predecessor_rows
simple_predecessor_rows = numpy.array(
list(simple_predecessor_rows), dtype=int
)
merging_predecessor_rows = numpy.array(
list(merging_predecessor_rows), dtype=int
)
return simple_predecessor_rows, merging_predecessor_rows
def _reverse_wind_linkages(storm_object_table, wind_to_storm_table):
"""Reverses wind linkages.
The input `wind_to_storm_table` contains wind-to-storm linkages, where each
wind observation is linked to 0 or 1 storms. The output
`storm_to_winds_table` will contain storm-to-wind linkages, where each storm
is linked to 0 or more wind observations.
K = number of wind observations linked to a given storm cell
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param wind_to_storm_table: pandas DataFrame created by
`_find_nearest_storms`.
:return: storm_to_winds_table: Same as input `storm_object_table`, but with
additional columns listed below. Each row is one storm object.
storm_to_winds_table.wind_station_ids: length-K list of string IDs for
weather stations.
storm_to_winds_table.event_latitudes_deg: length-K numpy array of latitudes
(deg N).
storm_to_winds_table.event_longitudes_deg: length-K numpy array of
longitudes (deg E).
storm_to_winds_table.u_winds_m_s01: length-K numpy array of u-wind
components (metres per second).
storm_to_winds_table.v_winds_m_s01: length-K numpy array of v-wind
components (metres per second).
storm_to_winds_table.linkage_distance_metres: length-K numpy array of
linkage distances (from wind observations to nearest edge of storm
cell).
storm_to_winds_table.relative_event_times_unix_sec: length-K numpy array
with relative times of wind observations (wind-ob time minus
storm-object time).
storm_to_winds_table.main_object_flags: length-K numpy array of Boolean
flags. If main_object_flags[k] = True in the [i]th row, the [i]th storm
object is the main object to which the [k]th wind observation was
linked.
"""
nested_array = storm_object_table[[
tracking_utils.SECONDARY_ID_COLUMN, tracking_utils.SECONDARY_ID_COLUMN
]].values.tolist()
num_storm_objects = len(storm_object_table.index)
these_flags = numpy.full(num_storm_objects, False, dtype=bool)
storm_to_winds_table = copy.deepcopy(storm_object_table)
storm_to_winds_table = storm_to_winds_table.assign(**{
WIND_STATION_IDS_COLUMN: nested_array,
EVENT_LATITUDES_COLUMN: nested_array,
EVENT_LONGITUDES_COLUMN: nested_array,
U_WINDS_COLUMN: nested_array,
V_WINDS_COLUMN: nested_array,
LINKAGE_DISTANCES_COLUMN: nested_array,
RELATIVE_EVENT_TIMES_COLUMN: nested_array,
MAIN_OBJECT_FLAGS_COLUMN: nested_array,
MERGING_PRED_FLAG_COLUMN: these_flags
})
for i in range(num_storm_objects):
storm_to_winds_table[WIND_STATION_IDS_COLUMN].values[i] = []
storm_to_winds_table[EVENT_LATITUDES_COLUMN].values[i] = []
storm_to_winds_table[EVENT_LONGITUDES_COLUMN].values[i] = []
storm_to_winds_table[U_WINDS_COLUMN].values[i] = []
storm_to_winds_table[V_WINDS_COLUMN].values[i] = []
storm_to_winds_table[LINKAGE_DISTANCES_COLUMN].values[i] = []
storm_to_winds_table[RELATIVE_EVENT_TIMES_COLUMN].values[i] = []
storm_to_winds_table[MAIN_OBJECT_FLAGS_COLUMN].values[i] = []
num_wind_obs = len(wind_to_storm_table.index)
for k in range(num_wind_obs):
this_secondary_id_string = wind_to_storm_table[
NEAREST_SECONDARY_ID_COLUMN
].values[k]
if this_secondary_id_string is None:
continue
this_storm_cell_flags = numpy.array([
s == this_secondary_id_string for s in
storm_to_winds_table[tracking_utils.SECONDARY_ID_COLUMN].values
])
this_storm_cell_rows = numpy.where(this_storm_cell_flags)[0]
this_nearest_time_unix_sec = wind_to_storm_table[
NEAREST_TIME_COLUMN].values[k]
these_time_diffs_sec = (
this_nearest_time_unix_sec -
storm_to_winds_table[tracking_utils.VALID_TIME_COLUMN].values[
this_storm_cell_rows]
)
# these_time_diffs_sec[these_time_diffs_sec < 0] = LARGE_INTEGER
this_main_object_row = this_storm_cell_rows[
numpy.argmin(numpy.absolute(these_time_diffs_sec))
]
these_simple_pred_rows, these_merging_pred_rows = _find_predecessors(
storm_to_events_table=storm_to_winds_table,
target_row=this_main_object_row)
storm_to_winds_table[MERGING_PRED_FLAG_COLUMN].values[
these_merging_pred_rows
] = True
for j in these_simple_pred_rows:
this_flag = (
storm_to_winds_table[tracking_utils.VALID_TIME_COLUMN].values[j]
> wind_to_storm_table[EVENT_TIME_COLUMN].values[k]
)
if this_flag:
continue
storm_to_winds_table[WIND_STATION_IDS_COLUMN].values[j].append(
wind_to_storm_table[raw_wind_io.STATION_ID_COLUMN].values[k]
)
storm_to_winds_table[EVENT_LATITUDES_COLUMN].values[j].append(
wind_to_storm_table[EVENT_LATITUDE_COLUMN].values[k]
)
storm_to_winds_table[EVENT_LONGITUDES_COLUMN].values[j].append(
wind_to_storm_table[EVENT_LONGITUDE_COLUMN].values[k]
)
storm_to_winds_table[U_WINDS_COLUMN].values[j].append(
wind_to_storm_table[raw_wind_io.U_WIND_COLUMN].values[k]
)
storm_to_winds_table[V_WINDS_COLUMN].values[j].append(
wind_to_storm_table[raw_wind_io.V_WIND_COLUMN].values[k]
)
storm_to_winds_table[LINKAGE_DISTANCES_COLUMN].values[j].append(
wind_to_storm_table[LINKAGE_DISTANCE_COLUMN].values[k]
)
this_relative_time_sec = (
wind_to_storm_table[EVENT_TIME_COLUMN].values[k] -
storm_to_winds_table[tracking_utils.VALID_TIME_COLUMN].values[j]
)
storm_to_winds_table[RELATIVE_EVENT_TIMES_COLUMN].values[j].append(
this_relative_time_sec)
storm_to_winds_table[MAIN_OBJECT_FLAGS_COLUMN].values[j].append(
j == this_main_object_row
)
for i in range(num_storm_objects):
storm_to_winds_table[EVENT_LATITUDES_COLUMN].values[i] = numpy.array(
storm_to_winds_table[EVENT_LATITUDES_COLUMN].values[i]
)
storm_to_winds_table[EVENT_LONGITUDES_COLUMN].values[i] = numpy.array(
storm_to_winds_table[EVENT_LONGITUDES_COLUMN].values[i]
)
storm_to_winds_table[U_WINDS_COLUMN].values[i] = numpy.array(
storm_to_winds_table[U_WINDS_COLUMN].values[i]
)
storm_to_winds_table[V_WINDS_COLUMN].values[i] = numpy.array(
storm_to_winds_table[V_WINDS_COLUMN].values[i]
)
storm_to_winds_table[LINKAGE_DISTANCES_COLUMN].values[i] = numpy.array(
storm_to_winds_table[LINKAGE_DISTANCES_COLUMN].values[i]
)
storm_to_winds_table[RELATIVE_EVENT_TIMES_COLUMN].values[i] = (
numpy.array(
storm_to_winds_table[RELATIVE_EVENT_TIMES_COLUMN].values[i],
dtype=int)
)
storm_to_winds_table[MAIN_OBJECT_FLAGS_COLUMN].values[i] = (
numpy.array(
storm_to_winds_table[MAIN_OBJECT_FLAGS_COLUMN].values[i],
dtype=bool)
)
return storm_to_winds_table
def _reverse_tornado_linkages(storm_object_table, tornado_to_storm_table):
"""Reverses tornado linkages.
The input `tornado_to_storm_table` contains tornado-to-storm linkages, where
each tornado is linked to 0 or 1 storms. The output
`storm_to_tornadoes_table` will contain storm-to-tornado linkages, where
each storm is linked to 0 or more tornadoes.
K = number of tornadoes linked to a given storm cell
:param storm_object_table: pandas DataFrame created by
`_project_storms_latlng_to_xy`.
:param tornado_to_storm_table: pandas DataFrame created by
`_find_nearest_storms`.
:return: storm_to_tornadoes_table: Same as input `storm_object_table`, but
with additional columns listed below. Each row is one storm object.
storm_to_tornadoes_table.event_latitudes_deg: length-K numpy array of
latitudes (deg N).
storm_to_tornadoes_table.event_longitudes_deg: length-K numpy array of
longitudes (deg E).
storm_to_tornadoes_table.f_or_ef_scale_ratings: length-K list of F-scale or
EF-scale ratings (strings).
storm_to_tornadoes_table.linkage_distance_metres: length-K numpy array of
linkage distances (from tornadoes to nearest edge of storm cell).
storm_to_tornadoes_table.relative_event_times_unix_sec: length-K numpy array
with relative times of tornadoes (tornado time minus storm-object time).
storm_to_tornadoes_table.main_object_flags: length-K numpy array of Boolean
flags. If main_object_flags[k] = True in the [i]th row, the [i]th storm
object is the main object to which the [k]th tornado was linked.
"""
nested_array = storm_object_table[[
tracking_utils.SECONDARY_ID_COLUMN, tracking_utils.SECONDARY_ID_COLUMN
]].values.tolist()
num_storm_objects = len(storm_object_table.index)
these_flags = numpy.full(num_storm_objects, False, dtype=bool)
storm_to_tornadoes_table = copy.deepcopy(storm_object_table)
storm_to_tornadoes_table = storm_to_tornadoes_table.assign(**{
EVENT_LATITUDES_COLUMN: nested_array,
EVENT_LONGITUDES_COLUMN: nested_array,
FUJITA_RATINGS_COLUMN: nested_array,
TORNADO_IDS_COLUMN: nested_array,
LINKAGE_DISTANCES_COLUMN: nested_array,
RELATIVE_EVENT_TIMES_COLUMN: nested_array,
MAIN_OBJECT_FLAGS_COLUMN: nested_array,
MERGING_PRED_FLAG_COLUMN: these_flags
})
for i in range(num_storm_objects):
storm_to_tornadoes_table[EVENT_LATITUDES_COLUMN].values[i] = []
storm_to_tornadoes_table[EVENT_LONGITUDES_COLUMN].values[i] = []
storm_to_tornadoes_table[FUJITA_RATINGS_COLUMN].values[i] = []
storm_to_tornadoes_table[TORNADO_IDS_COLUMN].values[i] = []
storm_to_tornadoes_table[LINKAGE_DISTANCES_COLUMN].values[i] = []
storm_to_tornadoes_table[RELATIVE_EVENT_TIMES_COLUMN].values[i] = []
storm_to_tornadoes_table[MAIN_OBJECT_FLAGS_COLUMN].values[i] = []
num_tornadoes = len(tornado_to_storm_table.index)
for k in range(num_tornadoes):
this_secondary_id_string = tornado_to_storm_table[
NEAREST_SECONDARY_ID_COLUMN
].values[k]
if this_secondary_id_string is None:
continue
this_storm_cell_flags = numpy.array([
s == this_secondary_id_string for s in
storm_to_tornadoes_table[tracking_utils.SECONDARY_ID_COLUMN].values
])
this_storm_cell_rows = numpy.where(this_storm_cell_flags)[0]
this_nearest_time_unix_sec = tornado_to_storm_table[
NEAREST_TIME_COLUMN].values[k]
these_time_diffs_sec = (
this_nearest_time_unix_sec -
storm_to_tornadoes_table[tracking_utils.VALID_TIME_COLUMN].values[
this_storm_cell_rows]
)
# these_time_diffs_sec[these_time_diffs_sec < 0] = LARGE_INTEGER
this_main_object_row = this_storm_cell_rows[
numpy.argmin(numpy.absolute(these_time_diffs_sec))
]
these_simple_pred_rows, these_merging_pred_rows = _find_predecessors(
storm_to_events_table=storm_to_tornadoes_table,
target_row=this_main_object_row)
storm_to_tornadoes_table[MERGING_PRED_FLAG_COLUMN].values[
these_merging_pred_rows
] = True
for j in these_simple_pred_rows:
this_flag = (
storm_to_tornadoes_table[
tracking_utils.VALID_TIME_COLUMN].values[j]
> tornado_to_storm_table[EVENT_TIME_COLUMN].values[k]
)
if this_flag:
continue
storm_to_tornadoes_table[EVENT_LATITUDES_COLUMN].values[j].append(
tornado_to_storm_table[EVENT_LATITUDE_COLUMN].values[k]
)
storm_to_tornadoes_table[EVENT_LONGITUDES_COLUMN].values[j].append(
tornado_to_storm_table[EVENT_LONGITUDE_COLUMN].values[k]
)
storm_to_tornadoes_table[FUJITA_RATINGS_COLUMN].values[j].append(
tornado_to_storm_table[
tornado_io.FUJITA_RATING_COLUMN].values[k]
)
storm_to_tornadoes_table[TORNADO_IDS_COLUMN].values[j].append(
tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values[k]
)
storm_to_tornadoes_table[LINKAGE_DISTANCES_COLUMN].values[j].append(
tornado_to_storm_table[LINKAGE_DISTANCE_COLUMN].values[k]
)
this_relative_time_sec = (
tornado_to_storm_table[EVENT_TIME_COLUMN].values[k] -
storm_to_tornadoes_table[
tracking_utils.VALID_TIME_COLUMN].values[j]
)
storm_to_tornadoes_table[RELATIVE_EVENT_TIMES_COLUMN].values[
j].append(this_relative_time_sec)
storm_to_tornadoes_table[MAIN_OBJECT_FLAGS_COLUMN].values[j].append(
j == this_main_object_row
)
for i in range(num_storm_objects):
storm_to_tornadoes_table[EVENT_LATITUDES_COLUMN].values[i] = (
numpy.array(
storm_to_tornadoes_table[EVENT_LATITUDES_COLUMN].values[i]
)
)
storm_to_tornadoes_table[EVENT_LONGITUDES_COLUMN].values[i] = (
numpy.array(
storm_to_tornadoes_table[EVENT_LONGITUDES_COLUMN].values[i]
)
)
storm_to_tornadoes_table[LINKAGE_DISTANCES_COLUMN].values[i] = (
numpy.array(
storm_to_tornadoes_table[LINKAGE_DISTANCES_COLUMN].values[i]
)
)
storm_to_tornadoes_table[RELATIVE_EVENT_TIMES_COLUMN].values[i] = (
numpy.array(
storm_to_tornadoes_table[RELATIVE_EVENT_TIMES_COLUMN].values[i],
dtype=int)
)
storm_to_tornadoes_table[MAIN_OBJECT_FLAGS_COLUMN].values[i] = (
numpy.array(
storm_to_tornadoes_table[MAIN_OBJECT_FLAGS_COLUMN].values[i],
dtype=bool)
)
return storm_to_tornadoes_table
def _remove_storms_near_start_of_period(
storm_object_table,
min_time_elapsed_sec=temporal_tracking.DEFAULT_MIN_VELOCITY_TIME_SEC):
"""Removes any storm object near the start of a tracking period.
This is because velocity estimates near the start of a tracking period are
lower-quality, which may cause erroneous linkages.
:param storm_object_table: pandas DataFrame created by
`_read_input_storm_tracks`. Each row is one storm object.
:param min_time_elapsed_sec: Minimum time into tracking period. Any storm
object occurring < `min_time_elapsed_sec` into a tracking period will be
removed.
:return: storm_object_table: Same as input but maybe with fewer rows.
"""
times_after_start_sec = (
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values -
storm_object_table[tracking_utils.TRACKING_START_TIME_COLUMN].values
)
bad_indices = numpy.where(times_after_start_sec < min_time_elapsed_sec)[0]
print((
'{0:d} of {1:d} storm objects occur within {2:d} seconds of beginning '
'of tracking period. REMOVING.'
).format(
len(bad_indices), len(storm_object_table.index), min_time_elapsed_sec
))
return storm_object_table.drop(
storm_object_table.index[bad_indices], axis=0, inplace=False
)
def _read_input_storm_tracks(tracking_file_names):
"""Reads storm tracks (input to linkage algorithm).
:param tracking_file_names: 1-D list of paths to storm-tracking files
(readable by `storm_tracking_io.read_file`).
:return: storm_object_table: pandas DataFrame with the following columns.
Each row is one storm object.
storm_object_table.primary_id_string: Primary ID for storm cell.
storm_object_table.secondary_id_string: Secondary ID for storm cell.
storm_object_table.full_id_string: Full ID for storm cell.
storm_object_table.unix_time_sec: Valid time.
storm_object_table.centroid_lat_deg: Latitude (deg N) of storm centroid.
storm_object_table.centroid_lng_deg: Longitude (deg E) of storm centroid.
storm_object_table.cell_start_time_unix_sec: First time in corresponding
storm cell.
storm_object_table.cell_end_time_unix_sec: Last time in corresponding storm
cell.
storm_object_table.tracking_start_time_unix_sec: Start of tracking period.
storm_object_table.tracking_end_time_unix_sec: End of tracking period.
storm_object_table.polygon_object_latlng: `shapely.geometry.Polygon`
object with storm outline in lat-long coordinates.
"""
list_of_storm_object_tables = []
for this_file_name in tracking_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_storm_object_table = tracking_io.read_file(this_file_name)[
REQUIRED_STORM_COLUMNS
]
list_of_storm_object_tables.append(this_storm_object_table)
if len(list_of_storm_object_tables) == 1:
continue
list_of_storm_object_tables[-1] = list_of_storm_object_tables[-1].align(
list_of_storm_object_tables[0], axis=1
)[0]
storm_object_table = pandas.concat(
list_of_storm_object_tables, axis=0, ignore_index=True)
return _remove_storms_near_start_of_period(
storm_object_table=storm_object_table)
def _read_input_wind_observations(
top_directory_name, storm_times_unix_sec,
max_time_before_storm_start_sec, max_time_after_storm_end_sec):
"""Reads wind observations (input to linkage algorithm).
:param top_directory_name: Name of top-level directory. Files therein will
be found by `raw_wind_io.find_processed_hourly_files` and read by
`raw_wind_io.read_processed_file`.
:param storm_times_unix_sec: 1-D numpy array with valid times of storm
objects.
:param max_time_before_storm_start_sec: See doc for `_check_input_args`.
:param max_time_after_storm_end_sec: Same.
:return: wind_table: pandas DataFrame with the following columns.
wind_table.station_id: String ID for station.
wind_table.unix_time_sec: Valid time.
wind_table.latitude_deg: Latitude (deg N).
wind_table.longitude_deg: Longitude (deg E).
wind_table.u_wind_m_s01: u-wind (metres per second).
wind_table.v_wind_m_s01: v-wind (metres per second).
"""
min_wind_time_unix_sec = numpy.min(
storm_times_unix_sec) - max_time_before_storm_start_sec
max_wind_time_unix_sec = numpy.max(
storm_times_unix_sec) + max_time_after_storm_end_sec
wind_file_names, _ = raw_wind_io.find_processed_hourly_files(
start_time_unix_sec=min_wind_time_unix_sec,
end_time_unix_sec=max_wind_time_unix_sec,
primary_source=raw_wind_io.MERGED_DATA_SOURCE,
top_directory_name=top_directory_name, raise_error_if_missing=True)
list_of_wind_tables = []
for this_file_name in wind_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
list_of_wind_tables.append(
raw_wind_io.read_processed_file(this_file_name)[
REQUIRED_WIND_COLUMNS]
)
if len(list_of_wind_tables) == 1:
continue
list_of_wind_tables[-1] = list_of_wind_tables[-1].align(
list_of_wind_tables[0], axis=1
)[0]
wind_table = pandas.concat(list_of_wind_tables, axis=0, ignore_index=True)
wind_speeds_m_s01 = numpy.sqrt(
wind_table[raw_wind_io.U_WIND_COLUMN].values ** 2 +
wind_table[raw_wind_io.V_WIND_COLUMN].values ** 2
)
bad_indices = raw_wind_io.check_wind_speeds(
wind_speeds_m_s01=wind_speeds_m_s01, one_component=False)
wind_table.drop(wind_table.index[bad_indices], axis=0, inplace=True)
column_dict_old_to_new = {
raw_wind_io.TIME_COLUMN: EVENT_TIME_COLUMN,
raw_wind_io.LATITUDE_COLUMN: EVENT_LATITUDE_COLUMN,
raw_wind_io.LONGITUDE_COLUMN: EVENT_LONGITUDE_COLUMN
}
wind_table.rename(columns=column_dict_old_to_new, inplace=True)
return wind_table
def _read_input_tornado_reports(
input_directory_name, storm_times_unix_sec,
max_time_before_storm_start_sec, max_time_after_storm_end_sec,
genesis_only=True, interp_time_interval_sec=None):
"""Reads tornado observations (input to linkage algorithm).
:param input_directory_name: Name of directory with tornado observations.
Relevant files will be found by `tornado_io.find_processed_file` and
read by `tornado_io.read_processed_file`.
:param storm_times_unix_sec: 1-D numpy array with valid times of storm
objects.
:param max_time_before_storm_start_sec: See doc for `_check_input_args`.
:param max_time_after_storm_end_sec: Same.
:param genesis_only: Boolean flag. If True, will return tornadogenesis
points only. If False, will return all points along each tornado track.
:param interp_time_interval_sec: [used only if `genesis_only == False`]
Time resolution for interpolating tornado location between start and end
points.
:return: tornado_table: pandas DataFrame with the following columns.
tornado_table.unix_time_sec: Valid time.
tornado_table.latitude_deg: Latitude (deg N).
tornado_table.longitude_deg: Longitude (deg E).
tornado_table.tornado_id_string: Tornado ID.
tornado_table.f_or_ef_rating: F-scale or EF-scale rating (string).
"""
# TODO(thunderhoser): Put most of this logic in tornado_io.py.
error_checking.assert_is_boolean(genesis_only)
if not genesis_only:
error_checking.assert_is_integer(interp_time_interval_sec)
error_checking.assert_is_greater(interp_time_interval_sec, 0)
min_tornado_time_unix_sec = (
numpy.min(storm_times_unix_sec) - max_time_before_storm_start_sec
)
max_tornado_time_unix_sec = (
numpy.max(storm_times_unix_sec) + max_time_after_storm_end_sec
)
min_tornado_year = int(time_conversion.unix_sec_to_string(
min_tornado_time_unix_sec, YEAR_FORMAT
))
max_tornado_year = int(time_conversion.unix_sec_to_string(
max_tornado_time_unix_sec, YEAR_FORMAT
))
tornado_years = numpy.linspace(
min_tornado_year, max_tornado_year,
num=max_tornado_year - min_tornado_year + 1, dtype=int
)
list_of_tornado_tables = []
for this_year in tornado_years:
this_file_name = tornado_io.find_processed_file(
directory_name=input_directory_name, year=this_year)
print('Reading data from: "{0:s}"...'.format(this_file_name))
list_of_tornado_tables.append(
tornado_io.read_processed_file(this_file_name)
)
if len(list_of_tornado_tables) == 1:
continue
list_of_tornado_tables[-1] = list_of_tornado_tables[-1].align(
list_of_tornado_tables[0], axis=1
)[0]
tornado_table = pandas.concat(
list_of_tornado_tables, axis=0, ignore_index=True)
if genesis_only:
invalid_flags = numpy.invert(numpy.logical_and(
tornado_table[tornado_io.START_TIME_COLUMN].values >=
min_tornado_time_unix_sec,
tornado_table[tornado_io.START_TIME_COLUMN].values <=
max_tornado_time_unix_sec
))
invalid_rows = numpy.where(invalid_flags)[0]
tornado_table.drop(
tornado_table.index[invalid_rows], axis=0, inplace=True
)
tornado_table = tornado_io.add_tornado_ids_to_table(tornado_table)
column_dict_old_to_new = {
tornado_io.START_TIME_COLUMN: EVENT_TIME_COLUMN,
tornado_io.START_LAT_COLUMN: EVENT_LATITUDE_COLUMN,
tornado_io.START_LNG_COLUMN: EVENT_LONGITUDE_COLUMN
}
tornado_table.rename(columns=column_dict_old_to_new, inplace=True)
else:
tornado_table = tornado_io.subset_tornadoes(
tornado_table=tornado_table,
min_time_unix_sec=min_tornado_time_unix_sec,
max_time_unix_sec=max_tornado_time_unix_sec)
tornado_table = tornado_io.interp_tornadoes_along_tracks(
tornado_table=tornado_table,
interp_time_interval_sec=interp_time_interval_sec)
column_dict_old_to_new = {
tornado_io.TIME_COLUMN: EVENT_TIME_COLUMN,
tornado_io.LATITUDE_COLUMN: EVENT_LATITUDE_COLUMN,
tornado_io.LONGITUDE_COLUMN: EVENT_LONGITUDE_COLUMN
}
tornado_table.rename(columns=column_dict_old_to_new, inplace=True)
return tornado_table
def _remove_redundant_tornado_linkages(
early_tornado_to_storm_table, late_tornado_to_storm_table):
"""Removes redundant tornado-occurrence linkages over two periods.
:param early_tornado_to_storm_table: pandas DataFrame (created by
`_find_nearest_storms`) for early period.
:param late_tornado_to_storm_table: Same but for late period.
:return: early_tornado_to_storm_table: Same as input but without redundant
linkages (those found in `late_tornado_to_storm_table`).
:return: late_tornado_to_storm_table: Same as input but without redundant
linkages (those found in `early_tornado_to_storm_table`).
:raises: ValueError: if any tornado appears in both tables with different
start times.
"""
unique_tornado_id_strings = numpy.unique(numpy.concatenate((
early_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values,
late_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values
)))
for this_tornado_id_string in unique_tornado_id_strings:
these_early_rows = numpy.where(
early_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values ==
this_tornado_id_string
)[0]
if len(these_early_rows) == 0:
continue
these_late_rows = numpy.where(
late_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values ==
this_tornado_id_string
)[0]
if len(these_late_rows) == 0:
continue
this_subrow = numpy.argmin(
early_tornado_to_storm_table[EVENT_TIME_COLUMN].values[
these_early_rows]
)
this_early_row = these_early_rows[this_subrow]
this_subrow = numpy.argmin(
late_tornado_to_storm_table[EVENT_TIME_COLUMN].values[
these_late_rows]
)
this_late_row = these_late_rows[this_subrow]
this_early_start_unix_sec = early_tornado_to_storm_table[
EVENT_TIME_COLUMN].values[this_early_row]
this_late_start_unix_sec = late_tornado_to_storm_table[
EVENT_TIME_COLUMN].values[this_late_row]
if this_early_start_unix_sec != this_late_start_unix_sec:
error_string = (
'Tornado "{0:s}" appears in early table with start time {1:s} '
'and in late table with start time {2:s}. The two start times '
'should be the same.'
).format(
this_tornado_id_string,
time_conversion.unix_sec_to_string(
this_early_start_unix_sec, TIME_FORMAT),
time_conversion.unix_sec_to_string(
this_late_start_unix_sec, TIME_FORMAT)
)
raise ValueError(error_string)
this_early_sec_id_string = early_tornado_to_storm_table[
NEAREST_SECONDARY_ID_COLUMN].values[this_early_row]
if this_early_sec_id_string is None:
early_tornado_to_storm_table.drop(
early_tornado_to_storm_table.index[these_early_rows],
axis=0, inplace=True)
continue
this_late_sec_id_string = late_tornado_to_storm_table[
NEAREST_SECONDARY_ID_COLUMN].values[this_late_row]
if (this_late_sec_id_string is None and
this_early_sec_id_string is not None):
late_tornado_to_storm_table.drop(
late_tornado_to_storm_table.index[these_late_rows],
axis=0, inplace=True)
continue
this_early_distance_metres = early_tornado_to_storm_table[
LINKAGE_DISTANCE_COLUMN].values[this_early_row]
this_late_distance_metres = late_tornado_to_storm_table[
LINKAGE_DISTANCE_COLUMN].values[this_late_row]
if this_early_distance_metres <= this_late_distance_metres:
late_tornado_to_storm_table.drop(
late_tornado_to_storm_table.index[these_late_rows],
axis=0, inplace=True)
continue
early_tornado_to_storm_table.drop(
early_tornado_to_storm_table.index[these_early_rows],
axis=0, inplace=True)
return early_tornado_to_storm_table, late_tornado_to_storm_table
def _share_tornado_linkages(
early_tornado_to_storm_table, late_tornado_to_storm_table,
early_storm_object_table, late_storm_object_table,
max_time_before_storm_start_sec, max_time_after_storm_end_sec):
"""Shares tornado-occurrence linkages between two periods.
:param early_tornado_to_storm_table: pandas DataFrame (created by
`_find_nearest_storms`) for early period.
:param late_tornado_to_storm_table: Same but for late period.
:param early_storm_object_table: pandas DataFrame (created by
`_read_input_storm_tracks`) for early period.
:param late_storm_object_table: Same but for late period.
:param max_time_before_storm_start_sec: See doc for `_check_pnut_args`.
:param max_time_after_storm_end_sec: Same.
:return: early_storm_to_tornadoes_table: pandas DataFrame (created by
`_reverse_tornado_linkages`) for early period.
:return: late_storm_to_tornadoes_table: Same but for late period.
"""
# Remove redundant tornado linkages.
orig_early_id_strings = numpy.unique(
early_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values
)
orig_late_id_strings = numpy.unique(
late_tornado_to_storm_table[tornado_io.TORNADO_ID_COLUMN].values
)
early_tornado_to_storm_table, late_tornado_to_storm_table = (
_remove_redundant_tornado_linkages(
early_tornado_to_storm_table=early_tornado_to_storm_table,
late_tornado_to_storm_table=late_tornado_to_storm_table)
)
# Concatenate storm-object tables.
num_early_storm_objects = len(early_storm_object_table.index)
early_storm_object_table = early_storm_object_table.assign(**{
EARLY_FLAG_COLUMN: numpy.full(num_early_storm_objects, True, dtype=bool)
})
num_late_storm_objects = len(late_storm_object_table.index)
late_storm_object_table = late_storm_object_table.assign(**{
EARLY_FLAG_COLUMN: numpy.full(num_late_storm_objects, False, dtype=bool)
})
storm_object_table = pandas.concat(
[early_storm_object_table, late_storm_object_table],
axis=0, ignore_index=True)
storm_object_table.drop_duplicates(
subset=[tracking_utils.SECONDARY_ID_COLUMN,
tracking_utils.VALID_TIME_COLUMN],
keep='first', inplace=True
)
# Concatenate relevant parts of tornado tables.
these_flags = early_tornado_to_storm_table[
tornado_io.TORNADO_ID_COLUMN
].isin(orig_late_id_strings).values
relevant_early_rows = numpy.where(these_flags)[0]
these_flags = late_tornado_to_storm_table[
tornado_io.TORNADO_ID_COLUMN
].isin(orig_early_id_strings).values
relevant_late_rows = numpy.where(these_flags)[0]
tornado_to_storm_table = pandas.concat([
early_tornado_to_storm_table.iloc[relevant_early_rows],
late_tornado_to_storm_table.iloc[relevant_late_rows]
], axis=0, ignore_index=True)
# For each tornado with some linked track segments and some unlinked
# segments, try linking the unlinked segments.
unique_times_unix_sec, orig_to_unique_indices = numpy.unique(
tornado_to_storm_table[EVENT_TIME_COLUMN].values, return_inverse=True
)
num_unique_times = len(unique_times_unix_sec)
for i in range(num_unique_times):
event_unassigned_flags = numpy.array([
s is None
for s in tornado_to_storm_table[NEAREST_SECONDARY_ID_COLUMN].values
], dtype=bool)
these_flags = numpy.logical_and(
event_unassigned_flags,
tornado_to_storm_table[TORNADO_ASSIGNED_COLUMN].values
)
these_rows = numpy.where(numpy.logical_and(
orig_to_unique_indices == i, these_flags
))[0]
for j in these_rows:
tornado_to_storm_table = _link_tornado_to_new_storm(
storm_object_table=storm_object_table,
tornado_to_storm_table=tornado_to_storm_table, tornado_row=j,
max_time_before_storm_start_sec=
max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec)
# Add new linkages to early and late linkage tables.
early_tornado_to_storm_table.iloc[relevant_early_rows] = (
tornado_to_storm_table.iloc[:len(relevant_early_rows)]
)
late_tornado_to_storm_table.iloc[relevant_late_rows] = (
tornado_to_storm_table.iloc[len(relevant_early_rows):]
)
# Reverse linkages (currently tornado -> storm, but we want storm ->
# tornadoes).
tornado_to_storm_table = pandas.concat(
[early_tornado_to_storm_table, late_tornado_to_storm_table],
axis=0, ignore_index=True)
storm_to_tornadoes_table = _reverse_tornado_linkages(
storm_object_table=storm_object_table,
tornado_to_storm_table=tornado_to_storm_table)
early_storm_to_tornadoes_table = storm_to_tornadoes_table.loc[
storm_to_tornadoes_table[EARLY_FLAG_COLUMN] == True
]
late_storm_to_tornadoes_table = storm_to_tornadoes_table.loc[
storm_to_tornadoes_table[EARLY_FLAG_COLUMN] == False
]
early_storm_to_tornadoes_table.drop(EARLY_FLAG_COLUMN, axis=1, inplace=True)
late_storm_to_tornadoes_table.drop(EARLY_FLAG_COLUMN, axis=1, inplace=True)
return early_storm_to_tornadoes_table, late_storm_to_tornadoes_table
def _share_linkages_with_predecessors(early_storm_to_events_table,
late_storm_to_events_table):
"""Shares events linked to each storm object with its predecessors.
This task is done by `_reverse_wind_linkages` and
`_reverse_tornado_linkages` but only for one period at a time. Use this
method to share linkages between successive periods. In other words, the
workflow should be as follows:
[1] Run `_reverse_wind_linkages` or `_reverse_tornado_linkages` on the early
period.
[2] Run `_reverse_wind_linkages` or `_reverse_tornado_linkages` on the late
period.
[3] Run this method to "fill the gap" between the two periods.
:param early_storm_to_events_table: pandas DataFrame created by
`_reverse_wind_linkages` or `_reverse_tornado_linkages`.
:param late_storm_to_events_table: Same.
:return: early_storm_to_events_table: Same as input but maybe with new
linkages.
:return: late_storm_to_events_table: Same as input but maybe with new
linkages. (Actually this should not change, but I have not yet verified
that.)
"""
storm_to_events_table = pandas.concat(
[early_storm_to_events_table, late_storm_to_events_table],
axis=0, ignore_index=True
)
if TORNADO_IDS_COLUMN in storm_to_events_table:
columns_to_change = TORNADO_LINKAGE_COLUMNS
else:
columns_to_change = WIND_LINKAGE_COLUMNS
columns_to_change.remove(MERGING_PRED_FLAG_COLUMN)
num_early_storm_objects = len(early_storm_to_events_table.index)
num_late_storm_objects = len(late_storm_to_events_table.index)
num_storm_objects = num_early_storm_objects + num_late_storm_objects
for i in range(num_early_storm_objects, num_storm_objects):
these_main_object_flags = storm_to_events_table[
MAIN_OBJECT_FLAGS_COLUMN].values[i]
these_event_indices = numpy.where(these_main_object_flags)[0]
if len(these_event_indices) == 0:
continue
these_rows = temporal_tracking.find_predecessors(
storm_object_table=storm_to_events_table, target_row=i,
num_seconds_back=LARGE_INTEGER, max_num_sec_id_changes=0,
return_all_on_path=True)
storm_to_events_table[MERGING_PRED_FLAG_COLUMN].values[these_rows] = (
numpy.logical_or(
storm_to_events_table[MERGING_PRED_FLAG_COLUMN].values[
these_rows],
storm_to_events_table[MERGING_PRED_FLAG_COLUMN].values[i]
)
)
these_simple_pred_rows = _find_predecessors(
storm_to_events_table=storm_to_events_table, target_row=i
)[0]
these_event_times_unix_sec = (
storm_to_events_table[tracking_utils.VALID_TIME_COLUMN].values[i] +
storm_to_events_table[RELATIVE_EVENT_TIMES_COLUMN].values[i][
these_event_indices]
)
for j in these_simple_pred_rows:
if j == i:
continue
these_relative_times_sec = (
these_event_times_unix_sec -
storm_to_events_table[
tracking_utils.VALID_TIME_COLUMN].values[j]
)
these_main_object_flags = numpy.full(
len(these_event_indices), False, dtype=bool
)
for this_column in columns_to_change:
if this_column == RELATIVE_EVENT_TIMES_COLUMN:
storm_to_events_table[this_column].values[j] = (
numpy.concatenate((
storm_to_events_table[this_column].values[j],
these_relative_times_sec
))
)
elif this_column == MAIN_OBJECT_FLAGS_COLUMN:
storm_to_events_table[this_column].values[j] = (
numpy.concatenate((
storm_to_events_table[this_column].values[j],
these_main_object_flags
))
)
else:
if isinstance(storm_to_events_table[this_column].values[j],
numpy.ndarray):
storm_to_events_table[this_column].values[j] = (
numpy.concatenate((
storm_to_events_table[this_column].values[j],
storm_to_events_table[this_column].values[i][
these_event_indices]
))
)
else:
this_new_list = [
storm_to_events_table[this_column].values[i][k]
for k in these_event_indices
]
storm_to_events_table[this_column].values[j] = (
storm_to_events_table[this_column].values[j] +
this_new_list
)
early_rows = numpy.linspace(
0, num_early_storm_objects - 1, num=num_early_storm_objects, dtype=int)
late_rows = numpy.linspace(
num_early_storm_objects, num_storm_objects - 1,
num=num_late_storm_objects, dtype=int)
return (
storm_to_events_table.iloc[early_rows],
storm_to_events_table.iloc[late_rows]
)
def check_event_type(event_type_string):
"""Error-checks event type.
:param event_type_string: Event type.
:raises: ValueError: if `event_type_string not in VALID_EVENT_TYPE_STRINGS`.
"""
error_checking.assert_is_string(event_type_string)
if event_type_string not in VALID_EVENT_TYPE_STRINGS:
error_string = (
'\n{0:s}\nValid event types (listed above) do not include '
'"{1:s}".'
).format(str(VALID_EVENT_TYPE_STRINGS), event_type_string)
raise ValueError(error_string)
def link_storms_to_winds(
top_wind_directory_name, tracking_file_names,
max_time_before_storm_start_sec=DEFAULT_MAX_TIME_BEFORE_STORM_SEC,
max_time_after_storm_end_sec=DEFAULT_MAX_TIME_AFTER_STORM_SEC,
bounding_box_padding_metres=DEFAULT_BBOX_PADDING_METRES,
storm_interp_time_interval_sec=10,
max_link_distance_metres=DEFAULT_MAX_WIND_DISTANCE_METRES):
"""Links each storm to zero or more wind observations.
:param top_wind_directory_name: See doc for `_read_input_wind_observations`.
:param tracking_file_names: See doc for `_check_input_args`.
:param max_time_before_storm_start_sec: Same.
:param max_time_after_storm_end_sec: Same.
:param bounding_box_padding_metres: Same.
:param storm_interp_time_interval_sec: Same.
:param max_link_distance_metres: Same.
:return: storm_to_winds_table: pandas DataFrame created by
`_reverse_wind_linkages`.
:return: metadata_dict: Dictionary created by `_check_input_args`.
"""
metadata_dict = _check_input_args(
tracking_file_names=tracking_file_names,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec,
bounding_box_padding_metres=bounding_box_padding_metres,
storm_interp_time_interval_sec=storm_interp_time_interval_sec,
max_link_distance_metres=max_link_distance_metres)
storm_object_table = _read_input_storm_tracks(tracking_file_names)
print(SEPARATOR_STRING)
num_storm_objects = len(storm_object_table.index)
if num_storm_objects == 0:
these_times_unix_sec = numpy.array(
[tracking_io.file_name_to_time(f) for f in tracking_file_names],
dtype=int
)
these_times_unix_sec = numpy.array([
numpy.min(these_times_unix_sec), numpy.max(these_times_unix_sec)
], dtype=int)
else:
these_times_unix_sec = storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values
wind_table = _read_input_wind_observations(
top_directory_name=top_wind_directory_name,
storm_times_unix_sec=these_times_unix_sec,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec)
print(SEPARATOR_STRING)
if num_storm_objects == 0:
num_wind_obs = len(wind_table.index)
wind_to_storm_table = wind_table.assign(**{
NEAREST_SECONDARY_ID_COLUMN: [None] * num_wind_obs,
LINKAGE_DISTANCE_COLUMN: numpy.full(num_wind_obs, numpy.nan),
NEAREST_TIME_COLUMN: numpy.full(num_wind_obs, -1, dtype=int)
})
else:
global_centroid_lat_deg, global_centroid_lng_deg = (
geodetic_utils.get_latlng_centroid(
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values)
)
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=global_centroid_lat_deg,
central_longitude_deg=global_centroid_lng_deg)
storm_object_table = _project_storms_latlng_to_xy(
storm_object_table=storm_object_table,
projection_object=projection_object)
wind_table = _project_events_latlng_to_xy(
event_table=wind_table, projection_object=projection_object)
wind_x_limits_metres, wind_y_limits_metres = (
_get_bounding_box_for_storms(
storm_object_table=storm_object_table,
padding_metres=bounding_box_padding_metres)
)
wind_table = _filter_events_by_bounding_box(
event_table=wind_table, x_limits_metres=wind_x_limits_metres,
y_limits_metres=wind_y_limits_metres)
wind_to_storm_table = _find_nearest_storms(
storm_object_table=storm_object_table, event_table=wind_table,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec,
interp_time_interval_sec=storm_interp_time_interval_sec,
max_link_distance_metres=max_link_distance_metres,
event_type_string=WIND_EVENT_STRING)
print(SEPARATOR_STRING)
storm_to_winds_table = _reverse_wind_linkages(
storm_object_table=storm_object_table,
wind_to_storm_table=wind_to_storm_table)
return storm_to_winds_table, metadata_dict
def link_storms_to_tornadoes(
tornado_directory_name, tracking_file_names,
max_time_before_storm_start_sec=DEFAULT_MAX_TIME_BEFORE_STORM_SEC,
max_time_after_storm_end_sec=DEFAULT_MAX_TIME_AFTER_STORM_SEC,
bounding_box_padding_metres=DEFAULT_BBOX_PADDING_METRES,
storm_interp_time_interval_sec=1,
max_link_distance_metres=DEFAULT_MAX_TORNADO_DISTANCE_METRES,
genesis_only=True, tornado_interp_time_interval_sec=60):
"""Links each storm to zero or more tornadoes.
:param tornado_directory_name: See doc for `_read_input_tornado_reports`.
:param tracking_file_names: See doc for `_check_input_args`.
:param max_time_before_storm_start_sec: Same.
:param max_time_after_storm_end_sec: Same.
:param bounding_box_padding_metres: Same.
:param storm_interp_time_interval_sec: Same.
:param max_link_distance_metres: Same.
:param genesis_only: Boolean flag. If True, will link only tornadogenesis
events (the start point of each tornado). If False, will link all
tornado occurrences (K-second track segments, where
K = `tornado_interp_time_interval_sec`).
:param tornado_interp_time_interval_sec:
[used only if `genesis_only` == False]
Interpolation time used to create tornado-track segments. For each
tornado, will interpolate location between start and end time at this
interval.
:return: storm_to_tornadoes_table: pandas DataFrame created by
`_reverse_tornado_linkages`.
:return: tornado_to_storm_table: pandas DataFrame created by
`_find_nearest_storms`.
:return: metadata_dict: Dictionary created by `_check_input_args`.
"""
metadata_dict = _check_input_args(
tracking_file_names=tracking_file_names,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec,
bounding_box_padding_metres=bounding_box_padding_metres,
storm_interp_time_interval_sec=storm_interp_time_interval_sec,
max_link_distance_metres=max_link_distance_metres)
storm_object_table = _read_input_storm_tracks(tracking_file_names)
print(SEPARATOR_STRING)
num_storm_objects = len(storm_object_table.index)
if num_storm_objects == 0:
these_times_unix_sec = numpy.array(
[tracking_io.file_name_to_time(f) for f in tracking_file_names],
dtype=int
)
these_times_unix_sec = numpy.array([
numpy.min(these_times_unix_sec), numpy.max(these_times_unix_sec)
], dtype=int)
else:
these_times_unix_sec = storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values
tornado_table = _read_input_tornado_reports(
input_directory_name=tornado_directory_name,
storm_times_unix_sec=these_times_unix_sec,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec,
genesis_only=genesis_only,
interp_time_interval_sec=tornado_interp_time_interval_sec)
print(SEPARATOR_STRING)
if num_storm_objects == 0:
num_tornadoes = len(tornado_table.index)
tornado_to_storm_table = tornado_table.assign(**{
NEAREST_SECONDARY_ID_COLUMN: [None] * num_tornadoes,
LINKAGE_DISTANCE_COLUMN: numpy.full(num_tornadoes, numpy.nan),
NEAREST_TIME_COLUMN: numpy.full(num_tornadoes, -1, dtype=int)
})
else:
global_centroid_lat_deg, global_centroid_lng_deg = (
geodetic_utils.get_latlng_centroid(
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values)
)
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=global_centroid_lat_deg,
central_longitude_deg=global_centroid_lng_deg)
storm_object_table = _project_storms_latlng_to_xy(
storm_object_table=storm_object_table,
projection_object=projection_object)
tornado_table = _project_events_latlng_to_xy(
event_table=tornado_table, projection_object=projection_object)
tornado_x_limits_metres, tornado_y_limits_metres = (
_get_bounding_box_for_storms(
storm_object_table=storm_object_table,
padding_metres=bounding_box_padding_metres)
)
tornado_table = _filter_events_by_bounding_box(
event_table=tornado_table, x_limits_metres=tornado_x_limits_metres,
y_limits_metres=tornado_y_limits_metres)
event_type_string = (
TORNADOGENESIS_EVENT_STRING if genesis_only
else TORNADO_EVENT_STRING
)
tornado_to_storm_table = _find_nearest_storms(
storm_object_table=storm_object_table, event_table=tornado_table,
max_time_before_storm_start_sec=max_time_before_storm_start_sec,
max_time_after_storm_end_sec=max_time_after_storm_end_sec,
interp_time_interval_sec=storm_interp_time_interval_sec,
max_link_distance_metres=max_link_distance_metres,
event_type_string=event_type_string)
print(SEPARATOR_STRING)
storm_to_tornadoes_table = _reverse_tornado_linkages(
storm_object_table=storm_object_table,
tornado_to_storm_table=tornado_to_storm_table)
return storm_to_tornadoes_table, tornado_to_storm_table, metadata_dict
def share_linkages(
top_input_dir_name, top_output_dir_name, first_spc_date_string,
last_spc_date_string, event_type_string):
"""Shares linkages across SPC dates.
This method stitches together results from `link_storms_to_winds` and
`link_storms_to_tornadoes`, which allows said methods to be run for one day
at a time, which allows massive parallelization.
:param top_input_dir_name: Name of top-level input directory. Files therein
will be found by `find_linkage_file` and read by `read_linkage_file`.
:param top_output_dir_name: Name of top-level input directory. Stitched
files will be written here by `write_linkage_file`, to exact locations
determined by `find_linkage_file`.
:param first_spc_date_string: First SPC date (format "yyyymmdd"). Results
will be stitched across SPC dates
`first_spc_date_string`...`last_spc_date_string`.
:param last_spc_date_string: See above.
:param event_type_string: Event type (must be accepted by
`check_event_type`).
"""
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
num_spc_dates = len(spc_date_strings)
old_linkage_file_names = [''] * num_spc_dates
new_linkage_file_names = [''] * num_spc_dates
for i in range(num_spc_dates):
old_linkage_file_names[i] = find_linkage_file(
top_directory_name=top_input_dir_name,
event_type_string=event_type_string, raise_error_if_missing=True,
spc_date_string=spc_date_strings[i]
)
new_linkage_file_names[i] = find_linkage_file(
top_directory_name=top_output_dir_name,
event_type_string=event_type_string, raise_error_if_missing=False,
spc_date_string=spc_date_strings[i]
)
if num_spc_dates == 1:
warning_string = (
'There is only one SPC date ("{0:s}"), so cannot share linkages '
'across SPC dates.'
).format(spc_date_strings[0])
warnings.warn(warning_string)
if top_input_dir_name == top_output_dir_name:
return
print('Copying file from "{0:s}" to "{1:s}"...'.format(
old_linkage_file_names[0], new_linkage_file_names[0]
))
file_system_utils.mkdir_recursive_if_necessary(
file_name=new_linkage_file_names[0]
)
shutil.copyfile(old_linkage_file_names[0], new_linkage_file_names[0])
return
metadata_dict = None
storm_to_events_table_by_date = [pandas.DataFrame()] * num_spc_dates
tornado_to_storm_table_by_date = [pandas.DataFrame()] * num_spc_dates
for i in range(num_spc_dates):
if i == num_spc_dates - 1:
for j in [num_spc_dates - 2, num_spc_dates - 1]:
print('Writing new linkages to: "{0:s}"...'.format(
new_linkage_file_names[j]
))
write_linkage_file(
pickle_file_name=new_linkage_file_names[j],
storm_to_events_table=storm_to_events_table_by_date[j],
metadata_dict=metadata_dict,
tornado_to_storm_table=tornado_to_storm_table_by_date[j]
)
break
if i >= 1:
print('Writing new linkages to: "{0:s}"...'.format(
new_linkage_file_names[i - 1]
))
write_linkage_file(
pickle_file_name=new_linkage_file_names[i - 1],
storm_to_events_table=storm_to_events_table_by_date[i - 1],
metadata_dict=metadata_dict,
tornado_to_storm_table=tornado_to_storm_table_by_date[i - 1]
)
storm_to_events_table_by_date[i - 2] = pandas.DataFrame()
# for j in [i - 1, i, i + 1]:
for j in [i, i + 1]:
if j < 0 or j >= num_spc_dates:
continue
if not storm_to_events_table_by_date[j].empty:
continue
print('Reading original linkages from: "{0:s}"...'.format(
old_linkage_file_names[j]
))
(storm_to_events_table_by_date[j], metadata_dict,
tornado_to_storm_table_by_date[j]
) = read_linkage_file(old_linkage_file_names[j])
if event_type_string == TORNADO_EVENT_STRING:
(storm_to_events_table_by_date[i],
storm_to_events_table_by_date[i + 1]
) = _share_tornado_linkages(
early_tornado_to_storm_table=tornado_to_storm_table_by_date[i],
late_tornado_to_storm_table=
tornado_to_storm_table_by_date[i + 1],
early_storm_object_table=storm_to_events_table_by_date[i],
late_storm_object_table=storm_to_events_table_by_date[i + 1],
max_time_before_storm_start_sec=metadata_dict[
MAX_TIME_BEFORE_START_KEY],
max_time_after_storm_end_sec=metadata_dict[
MAX_TIME_AFTER_END_KEY]
)
(storm_to_events_table_by_date[i],
storm_to_events_table_by_date[i + 1]
) = _share_linkages_with_predecessors(
early_storm_to_events_table=storm_to_events_table_by_date[i],
late_storm_to_events_table=storm_to_events_table_by_date[i + 1]
)
print(SEPARATOR_STRING)
def find_linkage_file(top_directory_name, event_type_string, spc_date_string,
unix_time_sec=None, raise_error_if_missing=True):
"""Finds linkage file for either one time or one SPC date.
:param top_directory_name: Name of top-level directory with linkage files.
:param event_type_string: Event type (must be accepted by
`check_event_type`).
:param spc_date_string: SPC date (format "yyyymmdd").
:param unix_time_sec: Valid time. If this is None, will look for one-day
file rather than one-time-step file.
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:return: linkage_file_name: Path to linkage file. If file is missing and
`raise_error_if_missing = False`, this will be the *expected* path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
error_checking.assert_is_string(top_directory_name)
check_event_type(event_type_string)
error_checking.assert_is_boolean(raise_error_if_missing)
if event_type_string == WIND_EVENT_STRING:
file_name_prefix = 'storm_to_winds'
elif event_type_string == TORNADOGENESIS_EVENT_STRING:
file_name_prefix = 'storm_to_tornadogenesis'
else:
file_name_prefix = 'storm_to_tornadoes'
if unix_time_sec is None:
time_conversion.spc_date_string_to_unix_sec(spc_date_string)
linkage_file_name = '{0:s}/{1:s}/{2:s}_{3:s}.p'.format(
top_directory_name, spc_date_string[:4], file_name_prefix,
spc_date_string
)
else:
spc_date_string = time_conversion.time_to_spc_date_string(unix_time_sec)
valid_time_string = time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT)
linkage_file_name = '{0:s}/{1:s}/{2:s}/{3:s}_{4:s}.p'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
file_name_prefix, valid_time_string
)
if raise_error_if_missing and not os.path.isfile(linkage_file_name):
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
linkage_file_name)
raise ValueError(error_string)
return linkage_file_name
def write_linkage_file(pickle_file_name, storm_to_events_table, metadata_dict,
tornado_to_storm_table=None):
"""Writes linkages to Pickle file.
The input args `tornado_to_storm_table` and `storm_object_table` are used
only if the event type is tornado occurrence (not genesis). Also, even
if the event type is tornado occurrence, these args can be left empty.
:param pickle_file_name: Path to output file.
:param storm_to_events_table: pandas DataFrame created by
`_reverse_wind_linkages` or `_reverse_tornado_linkages`.
:param metadata_dict: Dictionary created by `_check_input_args`.
:param tornado_to_storm_table: pandas DataFrame created by
`_find_nearest_storms`. This may be used in the future to share
linkages across SPC dates (see method
`share_linkages_across_spc_dates`).
"""
try:
error_checking.assert_columns_in_dataframe(
storm_to_events_table, REQUIRED_WIND_LINKAGE_COLUMNS)
except:
error_checking.assert_columns_in_dataframe(
storm_to_events_table, REQUIRED_TORNADO_LINKAGE_COLUMNS)
file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)
pickle_file_handle = open(pickle_file_name, 'wb')
pickle.dump(storm_to_events_table, pickle_file_handle)
pickle.dump(metadata_dict, pickle_file_handle)
pickle.dump(tornado_to_storm_table, pickle_file_handle)
pickle_file_handle.close()
def read_linkage_file(pickle_file_name):
"""Reads linkages from Pickle file.
:param pickle_file_name: Path to input file.
:return: storm_to_events_table: See doc for `write_linkage_file`.
:return: metadata_dict: Same.
:return: tornado_to_storm_table: Same.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
storm_to_events_table = pickle.load(pickle_file_handle)
if MERGING_PRED_FLAG_COLUMN not in list(storm_to_events_table):
num_storm_objects = len(storm_to_events_table.index)
storm_to_events_table = storm_to_events_table.assign(**{
MERGING_PRED_FLAG_COLUMN:
numpy.full(num_storm_objects, False, dtype=bool)
})
try:
metadata_dict = pickle.load(pickle_file_handle)
tornado_to_storm_table = pickle.load(pickle_file_handle)
except EOFError:
metadata_dict = None
tornado_to_storm_table = None
pickle_file_handle.close()
try:
error_checking.assert_columns_in_dataframe(
storm_to_events_table, REQUIRED_WIND_LINKAGE_COLUMNS)
tornado = False
except:
error_checking.assert_columns_in_dataframe(
storm_to_events_table, REQUIRED_TORNADO_LINKAGE_COLUMNS)
tornado = True
max_link_distance_metres = (
DEFAULT_MAX_TORNADO_DISTANCE_METRES if tornado
else DEFAULT_MAX_WIND_DISTANCE_METRES
)
metadata_dict = {
MAX_TIME_BEFORE_START_KEY: DEFAULT_MAX_TIME_BEFORE_STORM_SEC,
MAX_TIME_AFTER_END_KEY: DEFAULT_MAX_TIME_AFTER_STORM_SEC,
STORM_INTERP_TIME_KEY: 1 if tornado else 10,
BBOX_PADDING_KEY: DEFAULT_BBOX_PADDING_METRES,
MAX_LINK_DISTANCE_KEY: max_link_distance_metres
}
return storm_to_events_table, metadata_dict, tornado_to_storm_table
|
mit
|
Carralex/landlab
|
landlab/ca/examples/rock_weathering.py
|
6
|
5506
|
#!/usr/env/python
"""
rock_weathering.py
CellLab-CTS model that simulates the weathering of rock to saprolite around
a network of fractures.
Created (and translated from earlier code by) by Greg Tucker, Jul 2015
"""
from __future__ import print_function
import time
import numpy as np
from landlab import RasterModelGrid
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.raster_cts import RasterCTS
from landlab.components.fracture_grid.fracture_grid import make_frac_grid
import matplotlib
from landlab.io.netcdf import write_netcdf
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent the
grain-by-grain transformation of bedrock to saprolite.
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
Weathering here is treated very simply: a bedrock particle adjacent to a
saprolite particle has a specified probability (rate) of weathering to
saprolite; in other words, a rock-saprolite pair can turn into a
saprolite-saprolite pair.
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) 1 (0-1) 0.5
2 (1-0) 0.5
1 (0-1) 3 (1-1) 1.0
2 (1-0) 3 (1-1) 1.0
3 (1-1) (none) -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,1,0), 1., 'weathering') )
xn_list.append( Transition((1,0,0), (1,1,0), 1., 'weathering') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 200 # number of rows in grid
nc = 200 # number of columns in grid
plot_interval = 0.05 # time interval for plotting (unscaled)
run_duration = 5.0 # duration of run (unscaled)
report_interval = 10.0 # report interval, in real-time seconds
frac_spacing = 10 # average fracture spacing, nodes
outfilename = 'wx' # name for netCDF files
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Counter for output files
time_slice = 0
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'rock', 1 : 'saprolite' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid.
# (Note use of numpy's uint8 data type. This saves memory AND allows us
# to write output to a netCDF3 file; netCDF3 does not handle the default
# 64-bit integer type)
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=np.uint8)
node_state_grid[:] = make_frac_grid(frac_spacing, model_grid=mg)
# Create the CA model
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Set up the color map
rock_color = (0.8, 0.8, 0.8)
sap_color = (0.4, 0.2, 0)
clist = [rock_color, sap_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# Output the initial grid to file
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time', current_time, '(',
100 * current_time/run_duration, '%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# Output the current grid to a netCDF file
time_slice += 1
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# FINALIZE
# Plot
ca_plotter.finalize()
# If user runs this file, activate the main() function
if __name__ == "__main__":
main()
|
mit
|
semonte/intellij-community
|
python/helpers/pydev/pydev_ipython/matplotlibtools.py
|
10
|
5496
|
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
|
apache-2.0
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/matplotlib/tri/tricontour.py
|
10
|
10588
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.contour import ContourSet
from matplotlib.tri.triangulation import Triangulation
import matplotlib._tri as _tri
import numpy as np
class TriContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions for
a triangular grid.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw triangular grid contour lines or filled regions,
depending on whether keyword arg 'filled' is False
(default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in TriContourSet.tricontour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], TriContourSet):
C = args[0].cppContourGenerator
if self.levels is None:
self.levels = args[0].levels
else:
tri, z = self._contour_args(args, kwargs)
C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
x0 = tri.x.min()
x1 = tri.x.max()
y0 = tri.y.min()
y1 = tri.y.max()
self.ax.update_datalim([(x0, y0), (x1, y1)])
self.ax.autoscale_view()
self.cppContourGenerator = C
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for lower, upper in zip(lowers, uppers):
segs, kinds = self.cppContourGenerator.create_filled_contour(
lower, upper)
allsegs.append([segs])
allkinds.append([kinds])
else:
allkinds = None
for level in self.levels:
segs = self.cppContourGenerator.create_contour(level)
allsegs.append(segs)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
**kwargs)
z = np.asarray(args[0])
if z.shape != tri.x.shape:
raise ValueError('z array must have same length as triangulation x'
'and y arrays')
self.zmax = z.max()
self.zmin = z.min()
if self.logscale and self.zmin <= 0:
raise ValueError('Cannot %s log of negative values.' % fn)
self._contour_level_args(z, args[1:])
return (tri, z)
tricontour_doc = """
Draw contours on an unstructured triangular grid.
:func:`~matplotlib.pyplot.tricontour` and
:func:`~matplotlib.pyplot.tricontourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
The triangulation can be specified in one of two ways; either::
tricontour(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tricontour(x, y, ...)
tricontour(x, y, triangles, ...)
tricontour(x, y, triangles=triangles, ...)
tricontour(x, y, mask=mask, ...)
tricontour(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments may be::
tricontour(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation. The level values are chosen
automatically.
::
tricontour(..., Z, N)
contour *N* automatically-chosen levels.
::
tricontour(..., Z, V)
draw contour lines at the values specified in sequence *V*
::
tricontourf(..., Z, V)
fill the (len(*V*)-1) regions between the values in *V*
::
tricontour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
``C = tricontour(...)`` returns a
:class:`~matplotlib.contour.TriContourSet` object.
Optional keyword arguments:
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*levels* [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; e.g., to draw just the zero contour pass
``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
tricontour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
tricontourf-only keyword arguments:
*antialiased*: [ *True* | *False* ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
Note: tricontourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/tricontour_demo.py
"""
def tricontour(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = False
return TriContourSet(ax, *args, **kwargs)
tricontour.__doc__ = TriContourSet.tricontour_doc
def tricontourf(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = True
return TriContourSet(ax, *args, **kwargs)
tricontourf.__doc__ = TriContourSet.tricontour_doc
|
mit
|
pandastrail/InfoEng
|
scripting/exercises/p04_1_2.py
|
1
|
2308
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 22:23:39 2017
@author: hase
1.2 Trail Length
Write a program that computes the total length of a trail tracked
with your GPS device. Let’s assume your GPS device saves your
coordinates every 5 second into a list of tuples.
For the trail in the right image above, the list of tuples would look like this:
trail = [(142.492, 208.536),
(142.658, 207.060),
(143.522, 205.978),
(145.009, 205.546)]
Create a function pathlength(trail) for computing L according to the formula
above (see pdf). The argument trail should be a list of (x,y) coordinate tuples.
Test the function on a triangular path with the four points
(1, 1), (2, 1), (1, 2), and (1, 1).
Hint: To compute the square root of some value x, use import math
math.sqrt(x)
distance = sqrt((x2-x1)**2 + (y2-y1)**2)
Or use scipy.spatial.distance.cdist? or pdist?
"""
# Modules
import numpy as np
import matplotlib.pyplot as plt
import math
# Assigments
x_point = []
y_point = []
d = []
'''
xy_points = ({1: {'x': 1, 'y': 1},
2: {'x': 2, 'y': 1},
3: {'x': 1, 'y': 2},
4: {'x': 1, 'y': 1},
})
'''
xy_points = ({1: {'x':142.492, 'y': 208.536},
2: {'x':142.658, 'y': 207.060},
3: {'x':143.522, 'y': 205.978},
4: {'x':145.009, 'y': 205.546},
5: {'x':146.492, 'y': 204.536},
6: {'x':147.658, 'y': 203.060},
7: {'x':148.522, 'y': 202.978},
8: {'x':149.009, 'y': 201.546},
})
#print(xy_points)
# Distance is equal to 4.41
for i in range(1, len(xy_points)+1):
xy_point = xy_points[i]
#print(xy_point)
x_point.append(xy_point['x'])
y_point.append(xy_point['y'])
for i in range(1,len(xy_points),1):
print('i: ', i)
x = (xy_points[i+1]['x'] - xy_points[i]['x'])**2
print(xy_points[i+1]['x'], '-', xy_points[i]['x'], '**2')
y = (xy_points[i+1]['y'] - xy_points[i]['y'])**2
print(xy_points[i+1]['y'], '-', xy_points[i]['y'], '**2')
d_partial = math.sqrt(x+y)
d.append(d_partial)
print('partial distances: ', d)
d_total = sum(d)
print('total_distance: ', d_total)
#print('x: ', x_point)
#print('y: ', y_point)
plt.plot(x_point, y_point)
plt.show()
|
gpl-3.0
|
emdodds/DictLearner
|
tf_sparsenet.py
|
1
|
13831
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
try:
import matplotlib.pyplot as plt
except:
print("Can't import matplotlib.")
import tensorflow as tf
import sparsenet
class Sparsenet(sparsenet.Sparsenet):
"""
A sparse dictionary learner based on (Olshausen and Field, 1996).
Uses a tensorflow backend.
"""
def __init__(self,
data,
datatype="image",
pca=None,
nunits=200,
batch_size=100,
paramfile='dummy',
moving_avg_rate=0.01,
stimshape=None,
lam=0.15,
niter=200,
var_goal=0.04,
var_avg_rate=0.1,
gain_rate=0.01,
infrate=0.1,
learnrate=2.0,
store_every=1):
"""
Parameters
data : [nsamples, ndim] numpy array of training data
datatype : (str) "image" or "spectro"
pca : PCA object with inverse_transform(), or None
nunits : (int) number of units in sparsenet model
batch_size : (int) number of samples in each batch for learning
paramfile : (str) filename for pickle file storing parameters
moving_avg_rate: (float) rate for updating average statistics
stimshape : (array-like) original shape of each training datum
lam : (float) sparsity parameter; higher means more sparse
niter : (int) number of time steps in inference
var_goal : (float) target variance of activities
var_avg_rate: (float) rate for updating moving avg activity variance
gain_rate : (float) rate for updating gains to fix activity variance
infrate : (float) gradient descent rate for inference
learnrate : (float) gradient descent rate for learning
"""
# save input parameters
self.nunits = nunits
self.batch_size = batch_size
self.paramfile = paramfile
self.moving_avg_rate = moving_avg_rate
self.stimshape = stimshape or ((16, 16) if datatype == 'image'
else (25, 256))
self.lam = lam
self.niter = niter
self.var_goal = var_goal
self.var_avg_rate = var_avg_rate
self.gain_rate = gain_rate
self.infrate = infrate
self.learnrate = learnrate
self.store_every = store_every
# initialize model
self._load_stims(data, datatype, self.stimshape, pca)
self.Q = tf.random_normal([self.nunits, self.stims.datasize])
self.ma_variances = np.ones(self.nunits, dtype='float32')
self.gains = np.ones(self.nunits, dtype='float32')
self.graph = self.build_graph()
self.initialize_stats()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
self.config = tf.ConfigProto(gpu_options=gpu_options)
self.config.gpu_options.allow_growth = True
with tf.Session(graph=self.graph, config=self.config) as sess:
sess.run(self._init_op)
self.Q = sess.run(self.phi.assign(tf.nn.l2_normalize(self.phi,
dim=1)))
def initialize_stats(self):
self.loss_history = np.array([])
self.mse_history = np.array([])
self.L1_history = np.array([])
nunits = self.nunits
self.L0acts = np.zeros(nunits)
self.L1acts = np.zeros(nunits)
self.L2acts = np.zeros(nunits)
self.meanacts = np.zeros_like(self.L0acts)
def store_statistics(self, acts, loss_value, mse_value, meanL1_value):
eta = self.moving_avg_rate
self.loss_history = np.append(self.loss_history, loss_value)
self.mse_history = np.append(self.mse_history, mse_value)
self.L1_history = np.append(self.L1_history, meanL1_value/self.nunits)
self.L2acts = (1-eta)*self.L2acts + eta*(acts**2).mean(1)
self.L1acts = (1-eta)*self.L1acts + eta*np.abs(acts).mean(1)
L0means = np.mean(acts != 0, axis=1)
self.L0acts = (1-eta)*self.L0acts + eta*L0means
means = acts.mean(1)
self.meanacts = (1-eta)*self.meanacts + eta*means
def build_graph(self):
graph = tf.get_default_graph()
self._infrate = tf.Variable(self.infrate, trainable=False)
self._learnrate = tf.Variable(self.learnrate, trainable=False)
self.phi = tf.Variable(self.Q)
self.acts = tf.Variable(tf.zeros([self.nunits, self.batch_size]))
self.reset_acts = self.acts.assign(tf.zeros([self.nunits,
self.batch_size]))
self.x = tf.Variable(tf.zeros([self.batch_size, self.stims.datasize]),
trainable=False)
self.xhat = tf.matmul(tf.transpose(self.acts), self.phi)
self.resid = self.x - self.xhat
self.mse = tf.reduce_sum(tf.square(self.resid))
self.mse = self.mse/self.batch_size/self.stims.datasize
self.meanL1 = tf.reduce_sum(tf.abs(self.acts))/self.batch_size
self.loss = 0.5*self.mse + self.lam*self.meanL1/self.stims.datasize
self.snr = tf.reduce_mean(tf.square(self.x))/self.mse
self.snr_db = 10.0*tf.log(self.snr)/np.log(10.0)
inffactor = self.batch_size*self.stims.datasize
inferer = tf.train.GradientDescentOptimizer(self._infrate*inffactor)
self.inf_op = inferer.minimize(self.loss, var_list=[self.acts])
learner = tf.train.GradientDescentOptimizer(self._learnrate)
self.learn_op = learner.minimize(self.loss, var_list=[self.phi])
self._ma_variances = tf.Variable(self.ma_variances, trainable=False)
self._gains = tf.Variable(self.gains, trainable=False)
_, self.variances = tf.nn.moments(self.acts, axes=[1])
vareta = self.var_avg_rate
newvar = (1.-vareta)*self._ma_variances + vareta*self.variances
self.update_variance = self._ma_variances.assign(newvar)
newgain = self.gains*tf.pow(self.var_goal/self._ma_variances,
self.gain_rate)
self.update_gains = self._gains.assign(newgain)
normphi = (tf.expand_dims(self._gains,
dim=1)*tf.nn.l2_normalize(self.phi, dim=1))
self.renorm_phi = self.phi.assign(normphi)
self._init_op = tf.global_variables_initializer()
return graph
def train_step(self, sess):
sess.run(self.x.assign(self.get_batch()))
sess.run(self.reset_acts)
for ii in range(self.niter):
sess.run([self.inf_op, self.loss])
oplist = [self.learn_op, self.loss, self.mse, self.meanL1]
_, loss_value, mse_value, meanL1_value = sess.run(oplist)
sess.run(self.update_variance)
sess.run(self.update_gains)
sess.run(self.renorm_phi)
return sess.run(self.acts), loss_value, mse_value, meanL1_value
def initialize_vars(self, sess):
"""Initializes values of tf Variables."""
sess.run(self._init_op)
sess.run([self.phi.assign(self.Q),
self._infrate.assign(self.infrate),
self._learnrate.assign(self.learnrate),
self._ma_variances.assign(self.ma_variances),
self._gains.assign(self.gains)])
def retrieve_vars(self, sess):
"""Retrieve values from tf graph."""
stuff = sess.run([self.phi,
self._infrate,
self._learnrate,
self._ma_variances,
self._gains])
(self.Q, self.infrate,
self.learnrate, self.ma_variances, self.gains) = stuff
def run(self, nbatches=1000):
with tf.Session(config=self.config, graph=self.graph) as sess:
self.initialize_vars(sess)
for tt in range(nbatches):
results = self.train_step(sess)
if tt % self.store_every == 0:
self.store_statistics(*results)
if self.store_every > 50 or tt % 50 == 0:
print(tt)
self.retrieve_vars(sess)
if (tt % 1000 == 0 or tt+1 == nbatches) and tt != 0:
try:
print("Saving progress to " + self.paramfile)
self.save()
except (ValueError, TypeError) as er:
print('Failed to save parameters. ', er)
self.retrieve_vars(sess)
def show_dict(self, cmap='RdBu', subset=None, layout='sqrt', savestr=None):
"""Plot an array of tiled dictionary elements.
The 0th element is in the top right."""
if subset is not None:
indices = np.random.choice(self.nunits, subset)
Qs = self.Q[np.sort(indices)]
else:
Qs = self.Q
array = self.stims.stimarray(Qs[::-1], layout=layout)
plt.figure()
arrayplot = plt.imshow(array, interpolation='nearest', cmap=cmap,
aspect='auto', origin='lower')
plt.axis('off')
plt.colorbar()
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
return arrayplot
def show_element(self, index, cmap='jet', labels=None, savestr=None):
elem = self.stims.stim_for_display(self.Q[index])
plt.figure()
plt.imshow(elem.T, interpolation='nearest', cmap=cmap,
aspect='auto', origin='lower')
if labels is None:
plt.axis('off')
else:
plt.colorbar()
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
def test_inference(self, x=None):
x = self.get_batch() if x is None else x
costs = np.zeros(self.niter)
with tf.Session(config=self.config, graph=self.graph) as sess:
self.initialize_vars(sess)
sess.run(self.x.assign(x))
sess.run(self.reset_acts)
for ii in range(self.niter):
_, costs[ii] = sess.run([self.inf_op, self.loss])
plt.plot(costs, 'b')
print("Final SNR: " + str(sess.run(self.snr_db)))
finalacts = sess.run(self.acts)
return (finalacts, costs)
def get_batch(self):
return self.stims.rand_stim(batch_size=self.batch_size).T
def progress_plot(self, window_size=1000, norm=1, start=0, end=-1):
"""Plots a moving average of the error and activity history with
the given averaging window."""
window = np.ones(int(window_size))/float(window_size)
smoothederror = np.convolve(self.mse_history[start:end], window,
'valid')
smoothedactivity = np.convolve(self.L1_history[start:end], window,
'valid')
plt.plot(smoothederror, 'b', smoothedactivity, 'g')
def adjust_rates(self, factor):
self.learnrate = factor*self.learnrate
def sort_dict(self, **kwargs):
raise NotImplementedError
def sort(self, usages, sorter, plot=False, savestr=None):
self.Q = self.Q[sorter]
self.L0acts = self.L0acts[sorter]
self.L1acts = self.L1acts[sorter]
self.L2acts = self.L2acts[sorter]
self.meanacts = self.meanacts[sorter]
if plot:
plt.figure()
plt.plot(usages[sorter])
plt.title('L0 Usage')
plt.xlabel('Dictionary index')
plt.ylabel('Fraction of stimuli')
if savestr is not None:
plt.savefig(savestr, format='png', bbox_inches='tight')
def get_param_list(self):
return {'nunits': self.nunits,
'batch_size': self.batch_size,
'paramfile': self.paramfile,
'lam': self.lam,
'niter': self.niter,
'var_goal': self.var_goal,
'var_avg_rate': self.var_avg_rate,
'gain_rate': self.gain_rate,
'infrate': self.infrate,
'learnrate': self.learnrate,
'gains': self.gains,
'ma_variances': self.ma_variances}
def set_params(self, params):
for key, val in params.items():
try:
getattr(self, key)
except AttributeError:
print('Unexpected parameter passed: ' + key)
setattr(self, key, val)
def get_histories(self):
return {'loss': self.loss_history,
'mse': self.mse_history,
'L1': self.L1_history,
'L0acts': self.L0acts,
'L1acts': self.L1acts,
'L2acts': self.L2acts,
'meanacts': self.meanacts}
def set_histories(self, histories):
self.loss_history = histories['loss']
self.mse_history = histories['mse']
self.L1_history = histories['L1']
self.L0acts = histories['L0acts']
self.L1acts = histories['L1acts']
self.L2acts = histories['L2acts']
self.meanacts = histories['meanacts']
@property
def errorhist(self):
return self.mse_history
@errorhist.setter
def errorhist(self, value):
self.mse_history = value
|
mit
|
jesusfcr/airflow
|
airflow/contrib/plugins/metastore_browser/main.py
|
62
|
5773
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
|
apache-2.0
|
pkruskal/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
henridwyer/scikit-learn
|
sklearn/datasets/__init__.py
|
74
|
3616
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
pohno/hurricane
|
Processing/material.py
|
2
|
1435
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 12:06:48 2018
@author: geiger
"""
from optics_calcs.refrIndexData import RefrIndexData
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
class Material:
def __init__(self,name):
#get wavelength and refr from database
allData = RefrIndexData()
self.wavelengths = allData.data[name][0]
self.refr = allData.data[name][1]
#use interpolate function
self.f = scipy.interpolate.interp1d(self.wavelengths,self.refr)
#refractive index
def n(self,wl):
return float(self.f(wl))
#plot refractive index + interpolate function
def plotn(self):
plt.figure()
plt.plot(self.wavelengths,self.refr)
x = np.linspace(self.wavelengths[0],self.wavelengths[-1],num=10000)
y = self.f(x)
plt.plot(x,y,'--')
#group index, g = n(lamda) - lamda*(dn/dlamda)
def g(self,wl):
#use interpolated univariate spline to be able to calculate deriv
intfunc = scipy.interpolate.InterpolatedUnivariateSpline(self.wavelengths,self.refr)
#calculate derivative
intfuncderiv = intfunc.derivative()
g = intfunc(wl) - wl*intfuncderiv(wl)
return g
|
mit
|
TomAugspurger/pandas
|
pandas/tests/indexes/multi/test_analytics.py
|
1
|
6771
|
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p17
import pandas as pd
from pandas import Index, MultiIndex, date_range, period_range
import pandas._testing as tm
def test_shift(idx):
# GH8083 test the base class for shift
msg = "Not supported for type MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_groupby(idx):
groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2]))
labels = idx.tolist()
exp = {1: labels[:3], 2: labels[3:]}
tm.assert_dict_equal(groups, exp)
# GH5620
groups = idx.groupby(idx)
exp = {key: [key] for key in idx}
tm.assert_dict_equal(groups, exp)
def test_truncate():
major_axis = Index(list(range(4)))
minor_axis = Index(list(range(2)))
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
result = index.truncate(before=1)
assert "foo" not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
msg = "after < before"
with pytest.raises(ValueError, match=msg):
index.truncate(3, 1)
# TODO: reshape
def test_reorder_levels(idx):
# this blows up
with pytest.raises(IndexError, match="^Too many levels"):
idx.reorder_levels([2, 1, 0])
def test_numpy_repeat():
reps = 2
numbers = [1, 2, 3]
names = np.array(["foo", "bar"])
m = MultiIndex.from_product([numbers, names], names=names)
expected = MultiIndex.from_product([numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(m, reps, axis=1)
def test_append_mixed_dtypes():
# GH 13660
dti = date_range("2011-01-01", freq="M", periods=3)
dti_tz = date_range("2011-01-01", freq="M", periods=3, tz="US/Eastern")
pi = period_range("2011-01", freq="M", periods=3)
mi = MultiIndex.from_arrays(
[[1, 2, 3], [1.1, np.nan, 3.3], ["a", "b", "c"], dti, dti_tz, pi]
)
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays(
[
[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
["a", "b", "c", "a", "b", "c"],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi),
]
)
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays(
[
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
]
)
res = mi.append(other)
exp = MultiIndex.from_arrays(
[
[1, 2, 3, "x", "y", "z"],
[1.1, np.nan, 3.3, "x", "y", "z"],
["a", "b", "c", "x", "y", "z"],
dti.append(pd.Index(["x", "y", "z"])),
dti_tz.append(pd.Index(["x", "y", "z"])),
pi.append(pd.Index(["x", "y", "z"])),
]
)
tm.assert_index_equal(res, exp)
def test_iter(idx):
result = list(idx)
expected = [
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
]
assert result == expected
def test_sub(idx):
first = idx
# - now raises (previously was set op difference)
msg = "cannot perform __sub__ with this index type: MultiIndex"
with pytest.raises(TypeError, match=msg):
first - idx[-3:]
with pytest.raises(TypeError, match=msg):
idx[-3:] - first
with pytest.raises(TypeError, match=msg):
idx[-3:] - first.tolist()
msg = "cannot perform __rsub__ with this index type: MultiIndex"
with pytest.raises(TypeError, match=msg):
first.tolist() - idx[-3:]
def test_map(idx):
# callable
index = idx
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype("int64")
else:
expected = index
result = index.map(lambda x: x)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, idx: {i: e for e, i in zip(values, idx)},
lambda values, idx: pd.Series(values, idx),
],
)
def test_map_dictlike(idx, mapper):
if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip(f"skipping tests for {type(idx)}")
identity = mapper(idx.values, idx)
# we don't infer to UInt64 for a dict
if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict):
expected = idx.astype("int64")
else:
expected = idx
result = idx.map(identity)
tm.assert_index_equal(result, expected)
# empty mappable
expected = pd.Index([np.nan] * len(idx))
result = idx.map(mapper(expected, idx))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"func",
[
np.exp,
np.exp2,
np.expm1,
np.log,
np.log2,
np.log10,
np.log1p,
np.sqrt,
np.sin,
np.cos,
np.tan,
np.arcsin,
np.arccos,
np.arctan,
np.sinh,
np.cosh,
np.tanh,
np.arcsinh,
np.arccosh,
np.arctanh,
np.deg2rad,
np.rad2deg,
],
ids=lambda func: func.__name__,
)
def test_numpy_ufuncs(idx, func):
# test ufuncs of numpy. see:
# https://numpy.org/doc/stable/reference/ufuncs.html
if _np_version_under1p17:
expected_exception = AttributeError
msg = f"'tuple' object has no attribute '{func.__name__}'"
else:
expected_exception = TypeError
msg = (
"loop of ufunc does not support argument 0 of type tuple which "
f"has no callable {func.__name__} method"
)
with pytest.raises(expected_exception, match=msg):
func(idx)
@pytest.mark.parametrize(
"func",
[np.isfinite, np.isinf, np.isnan, np.signbit],
ids=lambda func: func.__name__,
)
def test_numpy_type_funcs(idx, func):
msg = (
f"ufunc '{func.__name__}' not supported for the input types, and the inputs "
"could not be safely coerced to any supported types according to "
"the casting rule ''safe''"
)
with pytest.raises(TypeError, match=msg):
func(idx)
|
bsd-3-clause
|
tmhm/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
yavalvas/yav_com
|
build/matplotlib/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
|
18
|
26105
|
"""
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
|
mit
|
ssaeger/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
176
|
12155
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
iLampard/alphaware
|
alphaware/tests/utils/test_input_validation.py
|
1
|
3236
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from parameterized import parameterized
import pandas as pd
import numpy as np
from numpy.testing.utils import assert_array_equal
from xutils.date_utils import Date
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
from alphaware.utils import (ensure_pd_series,
ensure_pyfin_date,
ensure_np_array,
ensure_pd_index_names)
from alphaware.enums import OutputDataFormat
from alphaware.const import (INDEX_FACTOR,
INDEX_INDUSTRY_WEIGHT)
class TestInputValidation(TestCase):
@parameterized.expand([([1, 2, 3], pd.Series([1, 2, 3])),
(np.array([1.0, 2.0, 3.0]), pd.Series([1.0, 2.0, 3.0])),
(pd.Series([1, 2, 3]), pd.Series([1, 2, 3]))
])
def test_ensure_pd_series(self, data, expected):
calculated = ensure_pd_series(None, None, data)
assert_series_equal(calculated, expected)
@parameterized.expand([('2014-01-02', '%Y-%m-%d', Date(2014, 1, 2)),
('2013/05/02', '%Y/%m/%d', Date(2013, 5, 2)),
(Date(2013, 5, 2), '%Y/%m/%d', Date(2013, 5, 2))])
def test_ensure_pyfin_date(self, data, date_format, expected):
calculated = ensure_pyfin_date(data, date_format)
self.assertEqual(calculated, expected)
@parameterized.expand([(None, None, pd.Series([1, 2, 3]), np.array([1, 2, 3])),
(None, None, pd.DataFrame([[1, 2, 3], [2, 3, 4]]), np.array([[1, 2, 3], [2, 3, 4]])),
(None, None, np.array([1, 2, 3]), np.array([1, 2, 3]))])
def test_ensure_np_array(self, func, argname, data, expected):
calculated = ensure_np_array(func, argname, data)
assert_array_equal(calculated, expected)
@parameterized.expand([(pd.DataFrame([1, 2],
index=pd.MultiIndex.from_product([['2010-01-01', '2010-01-02'], ['001']],
names=['trade_date', 'sec'])),
OutputDataFormat.MULTI_INDEX_DF,
INDEX_INDUSTRY_WEIGHT,
pd.DataFrame([1, 2],
index=pd.MultiIndex.from_product([['2010-01-01', '2010-01-02'], ['001']],
names=['trade_date', 'industry_code'])
)),
(pd.DataFrame([1, 2], index=pd.Index(['2010-01-01', '2010-01-02'], name='trade_date')),
OutputDataFormat.PITVOT_TABLE_DF,
INDEX_FACTOR,
pd.DataFrame([1, 2], index=pd.Index(['2010-01-01', '2010-01-02'], name='trade_date'))
)])
def test_ensure_pd_index_names(self, data, data_format, valid_index, expected):
calculated = ensure_pd_index_names(data, data_format, valid_index)
assert_frame_equal(calculated, expected)
|
apache-2.0
|
robertwb/incubator-beam
|
sdks/python/apache_beam/runners/interactive/display/pcoll_visualization_test.py
|
5
|
8679
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.interactive.display.pcoll_visualization."""
# pytype: skip-file
import unittest
from unittest.mock import ANY
from unittest.mock import PropertyMock
from unittest.mock import patch
import pytz
import apache_beam as beam
from apache_beam.runners import runner
from apache_beam.runners.interactive import interactive_beam as ib
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import interactive_runner as ir
from apache_beam.runners.interactive.display import pcoll_visualization as pv
from apache_beam.runners.interactive.recording_manager import RecordingManager
from apache_beam.runners.interactive.testing.mock_ipython import mock_get_ipython
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
try:
import timeloop
except ImportError:
pass
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
class PCollectionVisualizationTest(unittest.TestCase):
def setUp(self):
ie.new_env()
# Allow unit test to run outside of ipython kernel since we don't test the
# frontend rendering in unit tests.
pv._pcoll_visualization_ready = True
# Generally test the logic where notebook is connected to the assumed
# ipython kernel by forcefully setting notebook check to True.
ie.current_env()._is_in_notebook = True
ib.options.display_timezone = pytz.timezone('US/Pacific')
self._p = beam.Pipeline(ir.InteractiveRunner())
# pylint: disable=range-builtin-not-iterating
self._pcoll = self._p | 'Create' >> beam.Create(range(5))
ib.watch(self)
ie.current_env().track_user_pipelines()
recording_manager = RecordingManager(self._p)
recording = recording_manager.record([self._pcoll], 5, 5)
self._stream = recording.stream(self._pcoll)
def test_pcoll_visualization_generate_unique_display_id(self):
pv_1 = pv.PCollectionVisualization(self._stream)
pv_2 = pv.PCollectionVisualization(self._stream)
self.assertNotEqual(pv_1._dive_display_id, pv_2._dive_display_id)
self.assertNotEqual(pv_1._overview_display_id, pv_2._overview_display_id)
self.assertNotEqual(pv_1._df_display_id, pv_2._df_display_id)
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_one_shot_visualization_not_return_handle(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = True
self.assertIsNone(pv.visualize(self._stream, display_facets=True))
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_dynamic_plotting_return_handle(self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = True
h = pv.visualize(
self._stream, dynamic_plotting_interval=1, display_facets=True)
self.assertIsInstance(h, timeloop.Timeloop)
h.stop()
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_no_dynamic_plotting_when_not_in_notebook(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = False
h = pv.visualize(
self._stream, dynamic_plotting_interval=1, display_facets=True)
self.assertIsNone(h)
@patch(
'apache_beam.runners.interactive.display.pcoll_visualization'
'.PCollectionVisualization._display_dive')
@patch(
'apache_beam.runners.interactive.display.pcoll_visualization'
'.PCollectionVisualization._display_overview')
@patch(
'apache_beam.runners.interactive.display.pcoll_visualization'
'.PCollectionVisualization._display_dataframe')
def test_dynamic_plotting_updates_same_display(
self,
mocked_display_dataframe,
mocked_display_overview,
mocked_display_dive):
original_pcollection_visualization = pv.PCollectionVisualization(
self._stream, display_facets=True)
# Dynamic plotting always creates a new PCollectionVisualization.
new_pcollection_visualization = pv.PCollectionVisualization(
self._stream, display_facets=True)
# The display uses ANY data the moment display is invoked, and updates
# web elements with ids fetched from the given updating_pv.
new_pcollection_visualization.display(
updating_pv=original_pcollection_visualization)
mocked_display_dataframe.assert_called_once_with(
ANY, original_pcollection_visualization)
# Below assertions are still true without newer calls.
mocked_display_overview.assert_called_once_with(
ANY, original_pcollection_visualization)
mocked_display_dive.assert_called_once_with(
ANY, original_pcollection_visualization)
def test_auto_stop_dynamic_plotting_when_job_is_terminated(self):
fake_pipeline_result = runner.PipelineResult(runner.PipelineState.RUNNING)
ie.current_env().set_pipeline_result(self._p, fake_pipeline_result)
# When job is running, the dynamic plotting will not be stopped.
self.assertFalse(ie.current_env().is_terminated(self._p))
fake_pipeline_result = runner.PipelineResult(runner.PipelineState.DONE)
ie.current_env().set_pipeline_result(self._p, fake_pipeline_result)
# When job is done, the dynamic plotting will be stopped.
self.assertTrue(ie.current_env().is_terminated(self._p))
@patch('pandas.DataFrame.head')
def test_display_plain_text_when_kernel_has_no_frontend(self, _mocked_head):
# Resets the notebook check to False.
ie.current_env()._is_in_notebook = False
self.assertIsNone(pv.visualize(self._stream, display_facets=True))
_mocked_head.assert_called_once()
def test_event_time_formatter(self):
# In microseconds: Monday, March 2, 2020 3:14:54 PM GMT-08:00
event_time_us = 1583190894000000
self.assertEqual(
'2020-03-02 15:14:54.000000-0800',
pv.event_time_formatter(event_time_us))
def test_event_time_formatter_overflow_lower_bound(self):
# A relatively small negative event time, which could be valid in Beam but
# has no meaning when visualized.
event_time_us = -100000000000000000
self.assertEqual('Min Timestamp', pv.event_time_formatter(event_time_us))
def test_event_time_formatter_overflow_upper_bound(self):
# A relatively large event time, which exceeds the upper bound of unix time
# Year 2038. It could mean infinite future in Beam but has no meaning
# when visualized.
# The value in test is supposed to be year 10000.
event_time_us = 253402300800000000
self.assertEqual('Max Timestamp', pv.event_time_formatter(event_time_us))
def test_windows_formatter_global(self):
gw = GlobalWindow()
self.assertEqual(str(gw), pv.windows_formatter([gw]))
def test_windows_formatter_interval(self):
# The unit is second.
iw = IntervalWindow(start=1583190894, end=1583200000)
self.assertEqual(
'2020-03-02 15:14:54.000000-0800 (2h 31m 46s)',
pv.windows_formatter([iw]))
def test_pane_info_formatter(self):
self.assertEqual(
'Pane 0: Final Early',
pv.pane_info_formatter(
PaneInfo(
is_first=False,
is_last=True,
timing=PaneInfoTiming.EARLY,
index=0,
nonspeculative_index=0)))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
wdm0006/categorical_encoding
|
category_encoders/target_encoder.py
|
1
|
10127
|
"""Target Encoder"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
__author__ = 'chappers'
class TargetEncoder(BaseEstimator, TransformerMixin):
"""Target encoding for categorical features.
For the case of categorical target: features are replaced with a blend of posterior probability of the target
given particular categorical value and the prior probability of the target over all the training data.
For the case of continuous target: features are replaced with a blend of the expected value of the target
given particular categorical value and the expected value of the target over all the training data.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
min_samples_leaf: int
minimum samples to take category average into account.
smoothing: float
smoothing effect to balance categorical average vs prior. Higher value means stronger regularization.
The value must be strictly bigger than 0.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = TargetEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from
https://dl.acm.org/citation.cfm?id=507538
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value',
handle_unknown='value', min_samples_leaf=1, smoothing=1.0):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.cols = cols
self.ordinal_encoder = None
self.min_samples_leaf = min_samples_leaf
self.smoothing = float(smoothing) # Make smoothing a float so that python 2 does not treat as integer division
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.feature_names = None
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
self.mapping = self.fit_target_encoding(X_ordinal, y)
X_temp = self.transform(X, override_return_df=True)
self.feature_names = list(X_temp.columns)
if self.drop_invariant:
self.drop_cols = []
X_temp = self.transform(X)
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def fit_target_encoding(self, X, y):
mapping = {}
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
prior = self._mean = y.mean()
stats = y.groupby(X[col]).agg(['count', 'mean'])
smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing))
smoothing = prior * (1 - smoove) + stats['mean'] * smoove
smoothing[stats['count'] == 1] = prior
if self.handle_unknown == 'return_nan':
smoothing.loc[-1] = np.nan
elif self.handle_unknown == 'value':
smoothing.loc[-1] = prior
if self.handle_missing == 'return_nan':
smoothing.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
smoothing.loc[-2] = prior
mapping[col] = smoothing
return mapping
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target info (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
X = self.target_encode(X)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_transform(self, X, y=None, **fit_params):
"""
Encoders that utilize the target must make sure that the training data are transformed with:
transform(X, y)
and not with:
transform(X)
"""
# the interface requires 'y=None' in the signature but we need 'y'
if y is None:
raise(TypeError, 'fit_transform() missing argument: ''y''')
return self.fit(X, y, **fit_params).transform(X, y)
def target_encode(self, X_in):
X = X_in.copy(deep=True)
for col in self.cols:
X[col] = X[col].map(self.mapping[col])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
|
bsd-3-clause
|
boland1992/seissuite_iran
|
build/lib/ambient/spectrum/find_maxima.py
|
8
|
13563
|
# -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
#fig1 = plt.figure(figsize=(15,10))
#ax1 = fig1.add_subplot(111)
#ax1.set_title("Seismic Waveform Power Density Spectrum\n{}".format('S | 2014'))
#ax1.set_xlabel('Frequency (Hz)')
#ax1.set_ylabel('Power Density Spectrum (V RMS)')
#ax1.set_xlim([0,4])
#ax1.grid(True, axis='both', color='gray')
#ax1.set_autoscaley_on(True)
#ax1.set_yscale('log')
# initialise dictionary to hold all maxima information for a given station
# this will be used to return a new dictionary of the average maxima
# for each station over the course of a year.
maxima_dict0 = {}
maxima_dict1 = {}
a=5
for s in paths_list[:2]:
try:
split_path = s.split('/')
stat_info = split_path[-1][:-6]
net = stat_info.split('.')[0]
stat = stat_info.split('.')[1]
net_stat = '{}.{}'.format(net,stat)
year = split_path[-2].split('-')[0]
t0 = datetime.datetime.now()
st = read(s)
t1 = datetime.datetime.now()
if a == 5: # net == 'S':
print "time taken to import one month mseed was: ", t1-t0
# set up loop for all traces within each imported stream.
t0 = datetime.datetime.now()
pool = mp.Pool()
spectra = pool.map(spectrum, st[:])
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time taken to calculate monthly spectra: ", t1-t0
# Caclulate weighted average spectrum for this station for this month
spectra = np.asarray(spectra)
search = np.where(spectra==0.)
spectra = np.delete(spectra, search)
spectra = np.average(spectra, axis=0)
X, Y = spectra[:,0], spectra[:,1]
extrema_indices = argrelextrema(Y, np.greater)[0]
maxima_X = X[extrema_indices]
maxima_Y = Y[extrema_indices]
local_extrema = np.column_stack((maxima_X, maxima_Y))
# sort local maxima
local_extrema = local_extrema[local_extrema[:, 1].argsort()]
local_extrema = local_extrema[::-1]
# retrieve the top two maxima from the PDS plot for use on
# noise map.
max0, max1 = local_extrema[0], local_extrema[1]
maxes = [max0,max1]
if not net_stat in maxima_dict0.keys():
maxima_dict0[net_stat] = []
if net_stat in maxima_dict0.keys():
#if not len(maxima_dict[stat]) >= 1:
maxima_dict0[net_stat].append(max0)
if net_stat not in maxima_dict1.keys():
maxima_dict1[net_stat] = []
if net_stat in maxima_dict1.keys():
maxima_dict1[net_stat].append(max1)
#smooth_Y = np.convolve(X,Y)
#smooth_X = np.linspace(np.min(X), np.max(X),len(smooth_Y))
#plt.plot(smooth_X, smooth_Y, c='b', alpha=0.8)
#plt.plot(X, Y, c='k', alpha=0.5)
#plt.scatter(maxima_X, maxima_Y, c='r', s=30)
#plt.show()
#plt.clf()
except:
a=5
#plt.figure()
#stack and find average values for all of the above for each station
#for key in maxima_dict0.keys():
# stat_locs[key]
# maxima_dict0[key] = np.asarray(maxima_dict0[key])
# plt.scatter(maxima_dict0[key][:,0],maxima_dict0[key][:,1], c='b', s=10)
# maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
# plt.scatter(maxima_dict0[key][0],maxima_dict0[key][1], c='r', s=30)
# print maxima_dict0[key]
#for key in maxima_dict1.keys():
# maxima_dict1[key] = np.asarray(maxima_dict1[key])
# plt.scatter(maxima_dict1[key][:,0],maxima_dict1[key][:,1], c='b', s=10)
# maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
# plt.scatter(maxima_dict1[key][0],maxima_dict1[key][1], c='r', s=30)
#plt.show()
noise_info0 = []
#stack and find average values for all of the above for each station
for key in maxima_dict0.keys():
maxima_dict0[key] = np.asarray(maxima_dict0[key])
maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
noise_info0.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict0[key][1]])
noise_info0 = np.asarray(noise_info0)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
fig.savefig('station_pds_maxima/Peak1 PDS Average Maxima 2014.svg', format='SVG')
quit()
noise_info1 = []
#stack and find average values for all of the above for each station
for key in maxima_dict1.keys():
maxima_dict1[key] = np.asarray(maxima_dict1[key])
maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
noise_info1.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict1[key][1]])
noise_info1 = np.asarray(noise_info1)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig1 = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise Second Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info1[:,2]), np.max(noise_info1[:,2])
sc = plt.scatter(noise_info1[:,0], noise_info1[:,1], c=noise_info1[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
if shape_path is not None and UNIQUE_SHAPE is not None:
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
fig1.savefig('station_pds_maxima/Peak2 PDS Average Maxima 2014.svg', format='SVG')
with open('noise_info1.pickle', 'wb') as f:
pickle.dump(noise_info1, f, protocol=2)
|
gpl-3.0
|
mfjb/scikit-learn
|
examples/manifold/plot_manifold_sphere.py
|
258
|
5101
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
cwu2011/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
259
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
plissonf/scikit-learn
|
sklearn/neighbors/nearest_centroid.py
|
199
|
7249
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
bsd-3-clause
|
cjfuller/mobile_video_zoom
|
mvz/methods/bandpass_and_snapping.py
|
2
|
7180
|
"""Bandpass filter the positions, then snap to as few shots as possible."""
from typing import Any, List, Optional, Tuple
import funcy as fn
import numpy as np
import pandas as pd
import scipy.signal as sig
import scipy.stats as stats
from mvz import const
from mvz.methods import shared
from mvz.methods.shared import FrameSpecOutput
anticipation_time = 30
freq_cutoff = 0.05
padding = 80
initial_offset = 0
def bandpass_filter_data(data: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Lowpass filter the supplied x, y data."""
b, a = sig.butter(6, freq_cutoff)
x_filt = sig.lfilter(b, a, data['x'])
y_filt = sig.lfilter(b, a, data['y'])
return (x_filt, y_filt)
def choose_window(seq: np.ndarray, window_size: int) -> Tuple[float, int]:
"""Find the maximum length time window until action exits the box.
Returns a tuple of the minimum value in the box and the number of frames to
stay in that box.
"""
prev_min = 0.0
minval = float('Infinity')
maxval = 0.0
index = -1
while (maxval - minval) < window_size:
index += 1
prev_min = minval
if index == len(seq):
break
minval = min(minval, seq[index])
maxval = max(maxval, seq[index])
return (prev_min, index)
FrameSpec = List[Tuple[float, int]]
def make_frame_specs(x_filt: np.ndarray, y_filt: np.ndarray,
video_width: int, video_height: int) -> (
Tuple[FrameSpec, FrameSpec]):
def make_spec(seq: np.ndarray, box_size: int, max_size: int) -> FrameSpec:
frames = []
while len(seq) > 0:
start, idx = choose_window(seq, box_size - padding)
if start < 0:
start = 0
if start > max_size - box_size:
start = max_size - box_size
frames.append((start, idx))
seq = seq[idx:]
return frames
x_frames = make_spec(x_filt[initial_offset:],
const.box_width,
video_width)
y_frames = make_spec(y_filt[initial_offset:],
const.box_height,
video_height)
return x_frames, y_frames
def make_boxes_from_frame_spec(min_frame: int, max_frame: int,
xspec: FrameSpec, yspec: FrameSpec,
video_width: int, video_height: int,
keyframes_only: bool = False) -> (
FrameSpecOutput):
key_frames_x = [0] + list(fn.sums([frame for _, frame in xspec]))[:-1]
key_frames_y = [0] + list(fn.sums([frame for _, frame in yspec]))[:-1]
all_keyframes = list(sorted(list(
set(key_frames_x).union(set(key_frames_y)))))
@fn.autocurry
def key_frame_index(key_frames, frame):
for ki, k in enumerate(key_frames):
if k > frame:
return ki - 1
return len(key_frames) - 1
@fn.autocurry
def ensure_in_range(size, maxval, pos):
if pos - padding/2 < 0:
pos = padding/2
elif pos - padding/2 + size > maxval:
pos = maxval - size + padding/2
return pos
frame_pos_x_fn = fn.rcompose(
fn.partial(fn.map,
key_frame_index(key_frames_x)),
fn.partial(fn.map, lambda key_idx: int(round(xspec[key_idx][0]))),
fn.partial(fn.map,
ensure_in_range(const.box_width, video_width)))
frame_pos_y_fn = fn.rcompose(
fn.partial(fn.map,
key_frame_index(key_frames_y)),
fn.partial(fn.map, lambda key_idx: int(round(yspec[key_idx][0]))),
fn.partial(fn.map,
ensure_in_range(const.box_height, video_height)))
if keyframes_only:
frame_pos_x = frame_pos_x_fn(all_keyframes)
frame_pos_y = frame_pos_y_fn(all_keyframes)
return [
(float(frame) / len(range(min_frame, max_frame)),) +
shared.tuple4(tuple(
int(round(coord))
for coord in (pos_x - padding/2,
pos_y - padding/2,
pos_x - padding/2 + const.box_width,
pos_y - padding/2 + const.box_height)
)) for frame, pos_x, pos_y in zip(all_keyframes, frame_pos_x,
frame_pos_y)]
else:
frame_pos_x = frame_pos_x_fn(range(min_frame, max_frame))
frame_pos_y = frame_pos_y_fn(range(min_frame, max_frame))
return [shared.tuple4(tuple(
int(round(coord))
for coord in (pos_x - padding/2,
pos_y - padding/2,
pos_x - padding/2 + const.box_width,
pos_y - padding/2 + const.box_height)
)) for pos_x, pos_y in zip(frame_pos_x, frame_pos_y)]
def distance_to_next_change(boxes: List[Any], idx: int) -> Optional[int]:
"""Given a sequence and an index, find the number of elements to the next
value that's different from the current one.
"""
remboxes = boxes[idx:] + [None]
dist = fn.ilen(fn.takewhile(lambda b: b == boxes[idx], remboxes))
if remboxes[dist] is None:
return None
return dist
def _interpolate1(start_coord: int, finish_coord: int, distance: int) -> int:
mean = float(anticipation_time) / 2
scale = anticipation_time / 6
frac = stats.norm.cdf(anticipation_time - distance, loc=mean, scale=scale)
return int(round(start_coord + (finish_coord - start_coord) * frac))
def interpolate(start_box: const.BoundingBox, finish_box: const.BoundingBox,
distance: int) -> const.BoundingBox:
return shared.tuple4(tuple(
_interpolate1(start_coord, final_coord, distance)
for start_coord, final_coord in zip(start_box, finish_box)))
def anticipate_changes(
boxes: List[const.BoundingBox]) -> List[const.BoundingBox]:
should_anticipate = [
idx < len(boxes) - anticipation_time and
boxes[idx] != boxes[idx + anticipation_time]
for idx in range(len(boxes))]
new_boxes = []
for i, box in enumerate(boxes):
if should_anticipate[i]:
dist = distance_to_next_change(boxes, i)
result = interpolate(
box, boxes[i + dist], dist)
else:
result = box
new_boxes.append(result)
return new_boxes
def main(youtube_id: str, frame_count: int,
video_width: int, video_height: int,
keyframes_only: bool = False) -> FrameSpecOutput:
data = shared.read_path_data(const.path_data_fn(youtube_id))
x_filt, y_filt = bandpass_filter_data(data)
x_frames, y_frames = make_frame_specs(x_filt, y_filt, video_width, video_height)
boxes = make_boxes_from_frame_spec(
-initial_offset, frame_count - initial_offset,
x_frames, y_frames,
video_width, video_height,
keyframes_only=keyframes_only)
if not keyframes_only:
boxes_with_smoothing = anticipate_changes(boxes)
shared.crop_to_bounding_boxes(youtube_id, frame_count, boxes_with_smoothing)
return boxes
|
mit
|
yavalvas/yav_com
|
build/matplotlib/examples/pylab_examples/trigradient_demo.py
|
7
|
3075
|
"""
Demonstrates computation of gradient with matplotlib.tri.CubicTriInterpolator.
"""
from matplotlib.tri import Triangulation, UniformTriRefiner,\
CubicTriInterpolator
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import math
#-----------------------------------------------------------------------------
# Electrical potential of a dipole
#-----------------------------------------------------------------------------
def dipole_potential(x, y):
""" The electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
#-----------------------------------------------------------------------------
# Creating a Triangulation
#-----------------------------------------------------------------------------
# First create the x and y coordinates of the points.
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
# Create the Triangulation; no triangles specified so Delaunay triangulation
# created.
triang = Triangulation(x, y)
# Mask off unwanted triangles.
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
#-----------------------------------------------------------------------------
# Refine data - interpolates the electrical potential V
#-----------------------------------------------------------------------------
refiner = UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
#-----------------------------------------------------------------------------
# Computes the electrical field (Ex, Ey) as gradient of electrical potential
#-----------------------------------------------------------------------------
tci = CubicTriInterpolator(triang, -V)
# Gradient requested here at the mesh nodes but could be anywhere else:
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
#-----------------------------------------------------------------------------
# Plot the triangulation, the potential iso-contours and the vector field
#-----------------------------------------------------------------------------
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
plt.title('Gradient plot: an electrical dipole')
plt.show()
|
mit
|
chrisb13/mkmov
|
commands/_threedsurf.py
|
6
|
3107
|
## Author: Christopher Bull.
## Affiliation: Climate Change Research Centre and ARC Centre of Excellence for Climate System Science.
## Level 4, Mathews Building
## University of New South Wales
## Sydney, NSW, Australia, 2052
## Contact: [email protected]
## www: christopherbull.com.au
## Date created: Thu Jun 5 10:11:55 EST 2014
## Machine created on: squall.ccrc.unsw.edu.au
##
## The virtualenv packages available on creation date (includes systemwide):
## Cartopy==0.11.x
## Cython==0.19.1
## Fiona==1.1.2
## GDAL==1.10.1
## Jinja==1.2
## Jinja2==2.7.2
## MDP==3.3
## MarkupSafe==0.18
## PyNGL==1.4.0
## Pygments==1.6
## ScientificPython==2.8
## Shapely==1.3.0
## Sphinx==1.2.1
## backports.ssl-match-hostname==3.4.0.2
## basemap==1.0.7
## brewer2mpl==1.4
## descartes==1.0.1
## distribute==0.7.3
## docutils==0.11
## geopandas==0.1.0.dev-1edddad
## h5py==2.2.0
## ipython==1.2.0
## joblib==0.7.1
## matplotlib==1.3.1
## netCDF4==1.0.4
## nose==1.3.3
## numexpr==2.2.2
## numpy==1.8.1
## pandas==0.13.1
## patsy==0.2.1
## pexpect==2.4
## prettyplotlib==0.1.7
## progressbar==2.3
## py==1.4.20
## pycairo==1.8.6
## pygrib==1.9.7
## pyhdf==0.8.3
## pyparsing==2.0.2
## pyproj==1.9.3
## pyshp==1.2.1
## pytest==2.5.2
## python-dateutil==2.2
## pytz==2014.1
## pyzmq==14.0.1
## scikit-learn==0.13.1
## scipy==0.12.0
## seaborn==0.3.1
## six==1.6.1
## statsmodels==0.5.0
## tables==3.0.0
## tornado==3.2.1
## virtualenv==1.10.1
## wsgiref==0.1.2
## xmltodict==0.8.6
##
## The modules availabe on creation date:
## # Currently Loaded Modulefiles:
# 1) hdf5/1.8.11-intel 5) matlab/2011b 9) perl/5.18.2
# 2) ncview/2.1.2 6) python/2.7.5 10) gdal/1.10.1
# 3) netcdf/3.6.3-intel 7) proj/4.8.0
# 4) intel/13.1.3.192 8) geos/3.3.3
## # Currently Loaded Modulefiles:
# 1) hdf5/1.8.11-intel 5) matlab/2011b 9) perl/5.18.2
# 2) ncview/2.1.2 6) python/2.7.5 10) gdal/1.10.1
# 3) netcdf/3.6.3-intel 7) proj/4.8.0
# 4) intel/13.1.3.192 8) geos/3.3.3
#
#python logging
import logging as _logging
from functools import wraps as _wraps
class _LogStart(object):
"class that sets up a logger"
def setup(self,fname=''):
if fname=='':
_logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
level=_logging.DEBUG,disable_existing_loggers=True)
else:
_logging.basicConfig(filename=fname,filemode='w',format='%(name)s - %(levelname)s - %(message)s',
level=lg.DEBUG,disable_existing_loggers=False) #where filemode clobbers file
lg = _logging.getLogger(__name__)
return lg
if __name__ == "__main__": #are we being run directly?
lg=_LogStart().setup()
#lg=meh.go()
# print __name__
#LogStart(args.inputdir+'asciplot_lc_katana'+args.fno + '.log',fout=True)
lg.info('moo')
#PUT wothwhile code here!
|
gpl-3.0
|
zorojean/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
244
|
9986
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
|
bsd-3-clause
|
wilsonkichoi/zipline
|
zipline/utils/factory.py
|
3
|
10385
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Factory functions to prepare useful data.
"""
import pytz
import pandas as pd
import numpy as np
from datetime import timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import (
SimulationParameters, TradingEnvironment, noop_load
)
from zipline.sources.test_source import create_trade
from zipline.data.loader import ( # For backwards compatibility
load_from_yahoo,
load_bars_from_yahoo,
)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None,
data_frequency='daily',
emission_rate='daily',
env=None):
if env is None:
# Construct a complete environment with reasonable defaults
env = TradingEnvironment(load=noop_load)
if start is None:
start = pd.Timestamp("{0}-01-01".format(year), tz='UTC')
if end is None:
if num_days:
start_index = env.trading_days.searchsorted(start)
end = env.trading_days[start_index + num_days - 1]
else:
end = pd.Timestamp("{0}-12-31".format(year), tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
env=env,
)
return sim_params
def get_next_trading_dt(current, interval, env):
next_dt = pd.Timestamp(current).tz_convert(env.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=env.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if env.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(env.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params, env,
source_id="test_factory"):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, env)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params, env):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval, env)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_minutely_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_trade_source(sids, trade_time_increment, sim_params, env,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if env.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent,
'env': env,
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, env=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params and bars == 'daily':
index = sim_params.trading_days
else:
if env is None:
env = TradingEnvironment(load=noop_load)
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = env.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = env.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, env=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if env is None:
env = TradingEnvironment(load=noop_load)
index = env.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
df['type'] = source_type
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params, env):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
index = env.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
|
apache-2.0
|
biocore/American-Gut
|
scripts/mod2_pcoa.py
|
3
|
13711
|
#!/usr/bin/env python
import os
import click
from matplotlib import use
use('Agg') # noqa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skbio import read, DistanceMatrix
from skbio.stats import isubsample
from skbio.stats.ordination import OrdinationResults
from collections import defaultdict
from collections import OrderedDict
ALPHA = 1.0
LINE_WIDTH = 0.3
LINE_WIDTH_WHITE = 2.0
LINE_WIDTH_BLACK = 1.0
@click.group()
def mod2_pcoa():
pass
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--filename', required=True, type=str, help='Output filename')
@click.option('--sample', required=True, type=str,
help='The sample to print')
def body_site(coords, mapping_file, output, filename, sample):
"""Generates a bodysite figure for a sample in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, sep='\t', dtype=str)
mf.set_index('#SampleID', inplace=True)
mf = mf.loc[o.site_ids]
if sample not in o.site_ids:
raise ValueError("Sample %s not found" % sample)
color_hmp_fecal = sns.color_palette('Paired', 12)[10] # light brown
color_agp_fecal = sns.color_palette('Paired', 12)[11] # dark brown
color_hmp_oral = sns.color_palette('Paired', 12)[0] # light blue
color_agp_oral = sns.color_palette('Paired', 12)[1] # dark blue
color_hmp_skin = sns.color_palette('Paired', 12)[2] # light green
color_agp_skin = sns.color_palette('Paired', 12)[3] # dark green
grp_colors = {'AGP-FECAL': color_agp_fecal,
'AGP-ORAL': color_agp_oral,
'AGP-SKIN': color_agp_skin,
'HMP-FECAL': color_hmp_fecal,
'GG-FECAL': color_hmp_fecal,
'PGP-FECAL': color_hmp_fecal,
'HMP-ORAL': color_hmp_oral,
'PGP-ORAL': color_hmp_oral,
'HMP-SKIN': color_hmp_skin,
'PGP-SKIN': color_hmp_skin}
# plot categories as 50 slices with random zorder
for grp, color in grp_colors.iteritems():
sub_coords = c_df[mf.TITLE_BODY_SITE == grp].values
for i in np.array_split(sub_coords, 50):
if i.size == 0:
continue
plt.scatter(i[:, 0], i[:, 1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA, zorder=np.random.rand())
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=250, edgecolor=np.asarray(
grp_colors[mf.loc[sample]['TITLE_BODY_SITE']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, filename)
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help='Input distance matrix to subsample nearest sample')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--max', required=True, type=int,
help='Max number of samples per category value')
@click.option('--category', required=True, type=str,
help='The category to subsample in (likely COUNTRY)')
@click.option('--output', required=True, type=click.Path(exists=False,
writable=True, resolve_path=True), help='Output file')
def subsample_dm(distmat, mapping_file, max, category, output):
"""Subsample the distmat to max samples per category value"""
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
dtype=str)
mf.set_index('#SampleID', inplace=True)
id_to_cat = dict(mf[category])
def bin_f(x):
return id_to_cat.get(x)
dm = read(distmat, into=DistanceMatrix)
dm = dm.filter([id for _, id in isubsample(dm.ids, max, bin_f=bin_f)])
dm.to_file(output)
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--filename', required=True, type=str, help='Output filename')
@click.option('--sample', required=True, type=str,
help='The sample to print')
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help=('Input distance matrix to find nearest sample (if not '
'present in the coordinates'))
def country(coords, mapping_file, output, filename, sample, distmat):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
o_id_lookup = set(o.site_ids)
dm = read(distmat, into=DistanceMatrix)
dm_id_lookup = {i: idx for idx, i in enumerate(dm.ids)}
coord_samples_in_dm = {idx for idx, i in enumerate(dm.ids)
if i in o_id_lookup}
# we'll be computing min values, so we need to avoid catching the diagonal
np.fill_diagonal(dm._data, np.inf)
x, y = o.site[:, 0], o.site[:, 1]
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
dtype=str)
mf.set_index('#SampleID', inplace=True)
mf = mf.loc[o.site_ids]
if sample not in dm.ids:
raise ValueError("Sample %s not found" % sample)
color_Venezuela = sns.color_palette('Paired', 12)[10]
color_Malawi = sns.color_palette('Paired', 12)[1]
color_Western = sns.color_palette('Paired', 12)[4]
color_highlight = sns.color_palette('Paired', 12)[5]
color_no_data = (0.5, 0.5, 0.5)
grp_colors = OrderedDict()
grp_colors['no_data'] = color_no_data
grp_colors['Australia'] = color_Western
grp_colors['Belgium'] = color_Western
grp_colors['Canada'] = color_Western
grp_colors['China'] = color_Western
grp_colors['Finland'] = color_Western
grp_colors['France'] = color_Western
grp_colors['Germany'] = color_Western
grp_colors['Great Britain'] = color_Western
grp_colors['Ireland'] = color_Western
grp_colors['Japan'] = color_Western
grp_colors['Netherlands'] = color_Western
grp_colors['New Zealand'] = color_Western
grp_colors['Norway'] = color_Western
grp_colors['Scotland'] = color_Western
grp_colors['Spain'] = color_Western
grp_colors['Switzerland'] = color_Western
grp_colors['Thailand'] = color_Western
grp_colors['United Arab Emirates'] = color_Western
grp_colors['United Kingdom'] = color_Western
grp_colors['United States of America'] = color_Western
grp_colors['Malawi'] = color_Malawi
grp_colors['Venezuela'] = color_Venezuela
sample_to_plot = sample
if sample not in o_id_lookup:
# find the closest sample in the distance matrix that is in the
# coordinates data
closest_sample = None
for i in dm[dm_id_lookup[sample_to_plot]].argsort():
if i in coord_samples_in_dm:
closest_sample = dm.ids[i]
break
# this should not ever happen
if closest_sample is None:
raise ValueError("Unable to find a similar sample?")
sample_to_plot = closest_sample
# countour plot superimposed
sns.kdeplot(x, y, cmap='bone')
sns.set_context(rc={"lines.linewidth": 0.75})
# change particapant's country's color to color_highlight unless
# country is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_highlight
# plot each country except participant's according to colors above
for grp, color in grp_colors.iteritems():
if grp == mf.loc[sample_to_plot]['COUNTRY']:
continue
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# now plot participant's country
grp = mf.loc[sample_to_plot]['COUNTRY']
color = grp_colors[grp]
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# plot participant's dot
plt.scatter(c_df.loc[sample_to_plot][0], c_df.loc[sample_to_plot][1],
color=color_highlight,
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample_to_plot][0], c_df.loc[sample_to_plot][1],
color=color_highlight,
s=250, edgecolor=np.asarray(grp_colors[
mf.loc[sample_to_plot]['COUNTRY']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
# reset particapant's country's color to color_Western unless country
# is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Western
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, filename)
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--color', required=True, type=str,
help='Metadata category to set color by')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--filename', required=True, type=str, help='Output filename')
@click.option('--sample', required=True, type=str,
help='The sample to print')
def gradient(coords, mapping_file, color, output, filename, sample):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
dtype=str)
mf.set_index('#SampleID', inplace=True)
mf = mf.loc[o.site_ids]
mf[color] = mf[color].convert_objects(convert_numeric=True)
if sample not in o.site_ids:
raise ValueError("Sample %s not found" % sample)
numeric = mf[~pd.isnull(mf[color])]
non_numeric = mf[pd.isnull(mf[color])]
color_array = plt.cm.RdBu(numeric[color]/max(numeric[color]))
# plot numeric metadata as colored gradient
ids = numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c=numeric[color], cmap=plt.get_cmap('RdBu'),
alpha=ALPHA, lw=LINE_WIDTH, edgecolor=color_array*0.6)
# plot non-numeric metadata as gray
ids = non_numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c='0.5', alpha=ALPHA, lw=LINE_WIDTH, edgecolor='0.3')
# plot individual's dot
try:
color_index = numeric.index.tolist().index(sample)
except ValueError:
color_index = None
if color_index is None:
_color = (0.5, 0.5, 0.5)
else:
_color = color_array[color_index]
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=270, edgecolor='w', lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=250, edgecolor=np.asarray(_color)*0.6,
lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, filename)
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
if __name__ == '__main__':
mod2_pcoa()
|
bsd-3-clause
|
foreversand/QSTK
|
Examples/Basic/tutorial2.py
|
6
|
2979
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def main():
''' Main Function'''
# Reading the csv file.
na_data = np.loadtxt('example-data.csv', delimiter=',', skiprows=1)
na_price = na_data[:, 3:] # Default np.loadtxt datatype is float.
na_dates = np.int_(na_data[:, 0:3]) # Dates should be int
ls_symbols = ['$SPX', 'XOM', 'GOOG', 'GLD']
# Printing the first 5 rows
print "First 5 rows of Price Data:"
print na_price[:5, :]
print
print "First 5 rows of Dates:"
print na_dates[:5, :]
# Creating the timestamps from dates read
ldt_timestamps = []
for i in range(0, na_dates.shape[0]):
ldt_timestamps.append(dt.date(na_dates[i, 0],
na_dates[i, 1], na_dates[i, 2]))
# Plotting the prices with x-axis=timestamps
plt.clf()
plt.plot(ldt_timestamps, na_price)
plt.legend(ls_symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('adjustedclose.pdf', format='pdf')
# Normalizing the prices to start at 1 and see relative returns
na_normalized_price = na_price / na_price[0, :]
# Plotting the prices with x-axis=timestamps
plt.clf()
plt.plot(ldt_timestamps, na_normalized_price)
plt.legend(ls_symbols)
plt.ylabel('Normalized Close')
plt.xlabel('Date')
plt.savefig('normalized.pdf', format='pdf')
# Copy the normalized prices to a new ndarry to find returns.
na_rets = na_normalized_price.copy()
# Calculate the daily returns of the prices. (Inplace calculation)
tsu.returnize0(na_rets)
# Plotting the plot of daily returns
plt.clf()
plt.plot(ldt_timestamps[0:50], na_rets[0:50, 0]) # $SPX 50 days
plt.plot(ldt_timestamps[0:50], na_rets[0:50, 1]) # XOM 50 days
plt.axhline(y=0, color='r')
plt.legend(['$SPX', 'XOM'])
plt.ylabel('Daily Returns')
plt.xlabel('Date')
plt.savefig('rets.pdf', format='pdf')
# Plotting the scatter plot of daily returns between XOM VS $SPX
plt.clf()
plt.scatter(na_rets[:, 0], na_rets[:, 1], c='blue')
plt.ylabel('XOM')
plt.xlabel('$SPX')
plt.savefig('scatterSPXvXOM.pdf', format='pdf')
# Plotting the scatter plot of daily returns between $SPX VS GLD
plt.clf()
plt.scatter(na_rets[:, 0], na_rets[:, 3], c='blue') # $SPX v GLD
plt.ylabel('GLD')
plt.xlabel('$SPX')
plt.savefig('scatterSPXvGLD.pdf', format='pdf')
if __name__ == '__main__':
main()
|
bsd-3-clause
|
0asa/scikit-learn
|
sklearn/cluster/__init__.py
|
12
|
1331
|
"""
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, Ward, WardAgglomeration,
AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'Ward',
'WardAgglomeration',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
|
bsd-3-clause
|
RPGOne/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
50
|
7817
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
pratapvardhan/scikit-image
|
skimage/viewer/utils/core.py
|
19
|
6555
|
import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from ..._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
|
bsd-3-clause
|
marioyc/CCA-images-text
|
main/tag_to_image.py
|
1
|
2613
|
from gensim.models import word2vec
from pycocotools.coco import COCO
from scipy.spatial import distance
from sklearn.externals import joblib
import logging
import numpy as np
import os
import pickle
import time
import features
logging.basicConfig(filename='cca.log', format='%(asctime)s %(message)s', level=logging.INFO)
annFile = 'annotations/instances_val2014.json'
coco_instances = COCO(annFile)
ids = coco_instances.getAnnIds()
annotations = coco_instances.loadAnns(ids)
categories = coco_instances.loadCats(coco_instances.getCatIds())
category_name = {}
for cat in categories:
category_name[ cat['id'] ] = cat['name']
img_info = {}
img_categories = {}
logging.info('Testing: get info for all images')
for ann in annotations:
image_id = ann['image_id']
if image_id not in img_info:
img_info[image_id] = coco_instances.imgs[image_id]
img_categories[image_id] = set()
category = category_name[ ann['category_id'] ]
img_categories[image_id].add(category)
assert os.path.isfile('projections.npz')
projections = np.load('projections.npz')
pca = projections['pca'].item()
W_img = projections['W_img']
W_tag = projections['W_tag']
if not os.path.isfile('test_features.npz'):
img_ids, img_features = features.calc_testing_image_features(img_info, pca, W_img)
else:
test_features = np.load('test_features.npz')
img_ids = test_features['img_ids']
img_features = test_features['img_features']
N_IMGS = len(img_ids)
model = word2vec.Word2Vec.load_word2vec_format('text.model.bin', binary=True)
W_tag = projections['W_tag']
assert os.path.isfile('possible_tags.pkl')
possible_tags = pickle.load(open('possible_tags.pkl', 'rb'))
N_RESULTS = 50
tags = [cat['name'] for cat in categories]
f = open('t2i_results.txt', 'w')
for tag in tags:
if tag not in possible_tags:
f.write(tag + ' is not in the list of possible tags\n')
continue
f.write('TAG: ' + tag + '\n')
features = model[tag]
features = np.dot(features, W_tag)
scores = np.zeros(N_IMGS)
for i in range(N_IMGS):
scores[i] = distance.euclidean(img_features[i,:], features)
index = np.argsort(scores)
correct = 0
for i in range(N_RESULTS):
ind = index[i]
image_id = img_ids[ind]
info = img_info[image_id]
#f.write(info['flickr_url'] + ' ' + info['coco_url'] + '\n')
#for cat in img_categories[image_id]:
# f.write(cat + ', ')
#f.write('\n')
if tag in img_categories[image_id]:
correct += 1
f.write('Precision = {0:.2f}\n\n'.format(float(correct) / N_RESULTS))
|
mit
|
YuncyYe/ml
|
mlf/oneVersusAll_hard.py
|
1
|
7007
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import random
import math
##############################################
def sepLine(w, x):
return -((w[0]+w[1]*x)/w[2])
#end
def drawSepLine(w, minX, maxX):
sepx = range(minX, maxX)
sepy = []
for e in sepx:
tmp = sepLine(w, e)
sepy.append( tmp )
#end for
plt.plot(sepx, sepy )
#end drawSepLine
##############################################
"""
Get the label.
Note: the x has the constant item.
The return is 1 or -1.
if 1, then the x belong to w.
"""
def genLabel(x, w):
t = np.inner(w, x)
ty = np.sign(t)
return ty;
#emd
##############################################
#diamond
gdiamond=np.array([
[1.0, 12.0, 1.],
[1.5, 12.5, 1.],
[3.5, 11.5, 1.],
[4.5, 14.0, 1.],
[5.5, 16.0, 1.],
[6.0, 11.5, 1.],
[7.0, 10.5, 1.]
])
#rectangle
grectangle=np.array([
[9.5, 13.0, 2.],
[10.0, 11.5, 2.],
[10.5, 11.5, 2.],
[11.0, 13.0, 2.],
[12.0, 12.0, 2.],
[12.5, 12.5, 2.],
[13.0, 11.0, 2.],
[14.0, 10.0, 2.],
[15.0, 10.5, 2.],
[15.5, 10.6, 2.]
])
#triangle
gtriangle=np.array([
[1.0, 2.5, 3.],
[2.0, 6.0, 3.],
[3.0, 2.0, 3.],
[3.0, 5.0, 3.],
[4.0, 2.2, 3.],
[4.0, 5.5, 3.],
[6.0, 2.0, 3.],
[6.0, 5.5, 3.],
[6.5, 2.0, 3.],
[6.7, 0.5, 3.]
])
#star
gstar=np.array([
[9.5, 8.5, 4.],
[10.0, 1.5, 4.],
[11.0, 6.0, 4.],
[7.7, 6.0, 4.],
[8.0, 4.5, 4.],
[8.2, 4.0, 4.],
[9.0, 1.5, 4.],
[9.0, 4.5, 4.],
[9.5, 5.0, 4.],
[11.0, 1.5, 4.],
])
grtd = np.concatenate((gdiamond,grectangle, gtriangle, gstar))
gminX = (int)(np.min(grtd[:,:1]))-3
gmaxX = (int)(np.max(grtd[:,:1]))+3
gminY = np.min(grtd[:,1:2])-3
gmaxY = np.max(grtd[:,1:2])+3
grtestData = np.array([
[15.0, 15.0, 2.],
[13.0, 4.0, 4.],
[8.0, 8.0, 0.],
[10.0, 9.0, 0.],
[1.5, 7.0, 13.],
[2.0, 6.0, 13.],
[16.0, 7.0, 24.],
])
###plot the data
plt.xlim( (gminX, gmaxX) )
plt.ylim( (gminY, gmaxY) )
plt.plot(gdiamond[:,:1], gdiamond[:, 1:2], '.')
plt.plot(grectangle[:,:1], grectangle[:, 1:2], '1')
plt.plot(gtriangle[:,:1], gtriangle[:, 1:2], '+')
plt.plot(gstar[:,:1], gstar[:, 1:2], '*')
################
"""
Here we use cyclic_pla to binary classify two class.
"""
def cyclic_pla(td):
x0 = np.zeros( (len(td), 1) )
x0[:]=1.0
td = np.concatenate( (x0, td[:,:1], td[:,1:2], td[:,2:3]), 1 )
#The this initial value of w. td[0] include y. so we need to minus 1
w=np.zeros( len(td[0])-1 );
#
#ensure all point corret
stage=0;
while(True):
stage = stage+1;
#print("stage "+str(stage), w );
pass
isModifing=False;
#check each point for w
for idx in range(len(td)):
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
#failed, we need to update w
#print("In stage "+str(stage)+".we need to update w ", w);
print(idx, ty, sy)
w = w + sy*sx
isModifing = True
#end if
#end for
print("The w is ", w)
if(isModifing==False):
break;
#end while
return w
#end
################
"""
Here we use pocket to binary classify two class.
"""
def pocket(td):
#The this initial value of w. td[0] include y. so we need to minus 1
w=np.zeros( len(td[0])-1 );
#todo:we can set it as max of float
weighOfPocket=1000000000.0
wPocket=w #w in pocket, that current best w.
#
#ensure all point corret
maxIter=900000
maxIter=1200000
maxIter=42000
weighOfPocketThres=0.05
#calc weight for w
def calWeight(w, td):
weight=0.;
for idx in range(len(td)):
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
weight += 1.0;
#end for
return weight;
#end
curIter=0
while(curIter<maxIter):
curIter = curIter +1;
#pick up an element in sample to try to improve w
rndIdx=random.randint(0, len(td)-1)
sample = td[rndIdx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
print(rndIdx, ty, sy)
if(ty!=sy):
#failed, we need to update w
w = w + sy*sx
#print("The w is ", w, sy, sx)
weight = calWeight(w, td)
#if the new w is better than stuff in pocket, then update stuff in pocket
if(weight<weighOfPocket):
weighOfPocket = weight
wPocket = w
#end if
if(weighOfPocket<weighOfPocketThres):
break;
#end if
#print("The curIter is ", curIter)
print("The weighOfPocket is ", weighOfPocket)
print("The w is ", w)
#drawSepLine(w, gminX, gmaxX)
#end while
return wPocket;
#end
################
"""
if the y in each element of nrtd is not equal to label,
then set it as -1, thus we form the train data as one versus all.
Note:should set as -1 rather than 0!!!! refer to our current formula.
"""
def formOneVesusAll(td, label):
ntd = td.copy()
labelIdx = len(ntd[0])-1
for e in ntd:
if(e[labelIdx]!=label):
e[labelIdx]=-1 #IMPORTANT
else:
e[labelIdx]=1 #IMPORTANT
#end
return ntd
#end
"""
Use the one versus all to calculate all w. store all w in ws
"""
def oneVersusAllHard(td, ws):
pass;
labels=[1,2,3,4] #we can get shi from rtd[:,2:3], we just skip this here
for label in labels:
nrtd = formOneVesusAll(td, label);
#w=cyclic_pla(nrtd) #not work, since the nrtd are not binary classification strictly!!
w = pocket(nrtd)
ws.append(w)
print("w for label ", label, " is ", w)
pass;
#end for
#end
################
#add constant two the training data
x0 = np.zeros( (len(grtd), 1) )
x0[:]=1.0
gtd = np.concatenate( (x0, grtd[:,:1], grtd[:,1:2], grtd[:,2:3]), 1 )
gw=[];
oneVersusAllHard(gtd, gw);
#plot the line
for w in gw:
print("w :", w)
drawSepLine(w, gminX, gmaxX)
#end for
#gw : 1, 2, 3, 4
#lable: 1, 2, 3, 4
#plot test data
plt.plot(grtestData[:,:1], grtestData[:, 1:2], '_')
#update the test data
xt0 = np.zeros( (len(grtestData), 1) )
xt0[:]=1.0
gtestData = np.concatenate( (xt0, grtestData[:,:1], grtestData[:,1:2], grtestData[:,2:3]), 1 )
#test
for e in gtestData:
x = e[:len(e)-1]; y=e[len(e)-1]
msg = "For "+str(x)+" expented label:"+str(y)+", actual:"
for w in gw:
actualY=genLabel(x, w)
msg += str(actualY) + ";";
#end for
print(msg)
#end for
pass
################
|
apache-2.0
|
sinhrks/scikit-learn
|
sklearn/utils/graph.py
|
289
|
6239
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
vmaini/reinforcement_learning
|
q_learning_nn.py
|
1
|
2715
|
# Source: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
tf.reset_default_graph()
render = False # set to True to visualize the game
'''
NETWORK ARCHITECTURE
X [1,16] -- input, one-hot state vector
W [16,4] B [4]
Q_out [1x4] -- Q-values for each output
'''
X = tf.placeholder(shape = [1,16],dtype=tf.float32)
W = tf.Variable(tf.random_uniform([16,4],0,0.01))
Q_out = tf.matmul(X,W)
predict = tf.argmax(Q_out, axis=1)
# calculate loss
Q_next = tf.placeholder(shape=[1,4],dtype=tf.float32)
loss = tf.reduce_sum(tf.square(Q_next-Q_out))
trainer = tf.train.GradientDescentOptimizer(learning_rate = 0.1)
train_step = trainer.minimize(loss)
# hyperparameters
discount_rate = 0.99
epsilon = 0.1
num_episodes = 3000
j_list = []
rewards_list = []
running_reward = 0
running_rewards = []
# train the network
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in xrange(num_episodes):
s = env.reset()
reward_sum = 0
done = False
j = 0
while j < 99:
if render: env.render()
j += 1
# choose action greedily, with epsilon chance of random action
a, Q_all = sess.run([predict, Q_out],feed_dict={X:np.identity(16)[s:s+1]}) # passes in one-hot state vector as input
if np.random.rand(1) < epsilon:
a[0] = env.action_space.sample()
s1, reward, done, info = env.step(a[0])
# get max(Q'(s',a')) from Bellman equation to set our target value for chosen action
Q1 = sess.run(Q_out, feed_dict={X:np.identity(16)[s1:s1+1]})
max_Q1 = np.max(Q1)
Q_target = Q_all
Q_target[0,a[0]] = reward + discount_rate * max_Q1
# train network with target & %dpr % iediction, update reward sum & state
info, W1 = sess.run([train_step,W],feed_dict={X:np.identity(16)[s:s+1],Q_next:Q_target})
reward_sum += reward
s = s1
if done:
epsilon = 1./((i/50)+10)
break
j_list.append(j)
rewards_list.append(reward_sum)
running_reward += reward_sum
if i % 100 == 0:
running_rewards.append(running_reward)
print "epoch %d reward: %d" % (i/100, running_reward)
running_reward = 0
print "average success rate: " + str(sum(rewards_list)/num_episodes * 100) + "%"
print "best epoch: " + str(max(running_rewards)) + "%"
# plt.plot(j_list, color='g', label='num steps per episode')
plt.plot(running_rewards, color='g', label='running reward')
plt.legend()
plt.show()
|
mit
|
faroit/mir_eval
|
mir_eval/segment.py
|
1
|
43678
|
# CREATED:2013-08-13 12:02:42 by Brian McFee <[email protected]>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those used for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import collections
import warnings
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
from . import util
def validate_boundary(reference_intervals, estimated_intervals, trim):
"""Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : bool
will the start and end events be trimmed?
"""
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
"""Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
"""
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""(Non-adjusted) Rand index.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
rand_index : float > 0
Rand index
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
ari_score : float > 0
Adjusted Rand index between segmentations.
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
"""
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Frame-clustering segmentation: mutual information metrics.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
MI : float > 0
Mutual information between segmentations
AMI : float
Adjusted mutual information between segmentations.
NMI : float > 0
Normalize mutual information between segmentations
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta for F-measure
(Default value = 1.0)
Returns
-------
S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
S_F
F-measure for (S_over, S_under)
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
# The following scipy.stats.entropy calls are equivalent to
# scipy.stats.entropy(contingency, base=2)
# However the `base` kwarg has only been introduced in scipy 0.14.0
true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
|
mit
|
jlegendary/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
michaelbramwell/sms-tools
|
lectures/06-Harmonic-model/plots-code/f0-TWM-errors-1.py
|
22
|
3586
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
def TWM (pfreq, pmag, maxnpeaks, f0c):
# Two-way mismatch algorithm for f0 detection (by Beauchamp&Maher)
# pfreq, pmag: peak frequencies in Hz and magnitudes, maxnpeaks: maximum number of peaks used
# f0cand: frequencies of f0 candidates
# returns f0: fundamental frequency detected
p = 0.5 # weighting by frequency value
q = 1.4 # weighting related to magnitude of peaks
r = 0.5 # scaling related to magnitude of peaks
rho = 0.33 # weighting of MP error
Amax = max(pmag) # maximum peak magnitude
harmonic = np.matrix(f0c)
ErrorPM = np.zeros(harmonic.size) # initialize PM errors
MaxNPM = min(maxnpeaks, pfreq.size)
for i in range(0, MaxNPM) : # predicted to measured mismatch error
difmatrixPM = harmonic.T * np.ones(pfreq.size)
difmatrixPM = abs(difmatrixPM - np.ones((harmonic.size, 1))*pfreq)
FreqDistance = np.amin(difmatrixPM, axis=1) # minimum along rows
peakloc = np.argmin(difmatrixPM, axis=1)
Ponddif = np.array(FreqDistance) * (np.array(harmonic.T)**(-p))
PeakMag = pmag[peakloc]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorPM = ErrorPM + (Ponddif + MagFactor*(q*Ponddif-r)).T
harmonic = harmonic+f0c
ErrorMP = np.zeros(harmonic.size) # initialize MP errors
MaxNMP = min(10, pfreq.size)
for i in range(0, f0c.size) : # measured to predicted mismatch error
nharm = np.round(pfreq[:MaxNMP]/f0c[i])
nharm = (nharm>=1)*nharm + (nharm<1)
FreqDistance = abs(pfreq[:MaxNMP] - nharm*f0c[i])
Ponddif = FreqDistance * (pfreq[:MaxNMP]**(-p))
PeakMag = pmag[:MaxNMP]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorMP[i] = sum(MagFactor * (Ponddif + MagFactor*(q*Ponddif-r)))
Error = (ErrorPM[0]/MaxNPM) + (rho*ErrorMP/MaxNMP) # total error
f0index = np.argmin(Error) # get the smallest error
f0 = f0c[f0index] # f0 with the smallest error
return f0, ErrorPM, ErrorMP, Error
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 1024
hN = N/2
M = 801
t = -40
start = .8*fs
minf0 = 100
maxf0 = 1500
w = blackman (M)
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs * iploc/N
f0cand = np.arange(minf0, maxf0, 1.0)
maxnpeaks = 10
f0, ErrorPM, ErrorMP, Error = TWM (ipfreq, ipmag, maxnpeaks, f0cand)
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 7))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([100,5100,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(f0cand,ErrorPM[0], 'b', label = 'ErrorPM', lw=1.2)
plt.plot(f0cand,ErrorMP, 'g', label = 'ErrorMP', lw=1.2)
plt.plot(f0cand,Error, color='black', label = 'Error Total', lw=1.5)
plt.axis([minf0,maxf0,min(Error),130])
plt.legend()
plt.title('TWM Errors')
plt.tight_layout()
plt.savefig('f0-TWM-errors-1.png')
plt.show()
|
agpl-3.0
|
lbraginsky/Branching
|
FastBranching.py
|
1
|
7062
|
'''
Created on Sep 3, 2017
@author: lbraginsky
'''
import numpy as np
class Branching(object):
@staticmethod
def sparsity_fitness(gen, diameter):
from scipy import spatial
tree = spatial.cKDTree(gen)
f = lambda p: len(tree.query_ball_point(p, r=0.5*diameter))
return np.apply_along_axis(f, 1, gen)
fitness_functions = {
"euclidean": lambda gen: np.sqrt(np.sum(gen**2, axis=1)),
"sumsq": lambda gen: np.sum(gen**2, axis=1),
"sumabs": lambda gen: np.sum(abs(gen), axis=1),
"maxabs": lambda gen: np.max(abs(gen), axis=1),
"absprod": lambda gen: np.prod(abs(gen), axis=1),
"sparsity(100)": (lambda gen: Branching.sparsity_fitness(gen, 100), True)
}
def __init__(self, branch_prob, fitness, initial_population, max_size):
self.branch_prob = branch_prob
self.set_fitness(fitness)
self.max_size = max_size
self.members = initial_population
self.generation = 0
def set_fitness(self, fitness):
f = Branching.fitness_functions[fitness]
try:
self.fitness_fun, self.low_fit = f
except TypeError:
self.fitness_fun, self.low_fit = f, False
self.fitness = fitness
def step(self):
# Everybody moves
gen = self.members + np.random.normal(size=self.members.shape)
# Everybody branches with luck
ind = np.random.uniform(size=gen.shape[0]) < self.branch_prob
gen = np.append(gen, gen[ind], axis=0)
# Sort by fitness
ind = np.argsort(self.fitness_fun(gen))
if not self.low_fit: ind = ind[::-1]
# Truncate to max size
# New generation becomes members
self.members = gen[ind][:self.max_size]
self.generation += 1
def run(self, num_steps):
for _i in range(num_steps):
self.step()
def stats(self):
f = self.fitness_fun(self.members)
return {"gen": self.generation, "count": len(f), "min": np.min(f), "max": np.max(f), "avg": np.mean(f), "std": np.std(f)}
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
class Simulation(object):
def __init__(self, branching, steps_per_update=1):
ndim = branching.members.shape[1]
if not ndim in [1, 2, 3]:
raise NotImplementedError("Cannot display {}-dimensions".format(ndim))
self.branching = branching
self.steps_per_update = steps_per_update
self.ndim = ndim
self.running = False
self.ax_lims = [(-50, 50)] * ndim
self.scatter = [self.scatter_d1, self.scatter_d2, self.scatter_d3][ndim-1]
self.fig = plt.figure(figsize=(9, 8))
self.ax = self.plot()
self.setup_controls()
def scaling(self):
members = self.branching.members
d_lims = list(zip(np.min(members, axis=0), np.max(members, axis=0)))
if any(d[0] < a[0] or d[1] > a[1] for a, d in zip(self.ax_lims, d_lims)):
r = max(b - a for a, b in d_lims) * 0.75
for d in range(len(d_lims)):
c = sum(d_lims[d])/2
self.ax_lims[d] = (c - r, c + r)
def plot(self):
ax = self.fig.add_subplot(111, projection='3d' if ndim==3 else None)
ax.set_autoscale_on(False)
if ndim==1: ax.get_yaxis().set_visible(False)
return ax
def scatter_d1(self):
x = [x[0] for x in self.branching.members]
y = [0 for _v in x]
self.ax.scatter(x, y, marker='.')
self.ax.set_xlim(self.ax_lims[0])
def scatter_d2(self):
x, y = zip(*self.branching.members)
self.ax.scatter(x, y, marker='.', s=10)
self.ax.set_xlim(self.ax_lims[0])
self.ax.set_ylim(self.ax_lims[1])
self.ax.annotate("Gen: {}".format(self.branching.generation),
xy=(1, 0), xycoords='axes fraction',
xytext=(-10, 10), textcoords='offset pixels',
horizontalalignment='right',
verticalalignment='bottom')
def scatter_d3(self):
x, y, z = zip(*self.branching.members)
self.ax.scatter(x, y, z, s=5)
self.ax.set_xlim(self.ax_lims[0])
self.ax.set_ylim(self.ax_lims[1])
self.ax.set_zlim(self.ax_lims[2])
def setup_controls(self):
from matplotlib.widgets import Button, RadioButtons, Slider
self.g_branch_prob = Slider(plt.axes([0.1, 0.03, 0.1, 0.02]), 'Branch\nprob', 0, 1,
valinit=self.branching.branch_prob, valfmt='%.2f')
self.g_branch_prob.on_changed(self.update_branch_prob)
self.g_max_size = Slider(plt.axes([0.3, 0.03, 0.1, 0.02]), 'Max\nsize', 1, 5,
valinit=np.log10(self.branching.max_size), valfmt='%.0f')
self.g_max_size.valtext.set_text(self.branching.max_size)
self.g_max_size.on_changed(self.update_max_size)
self.g_fitness = RadioButtons(plt.axes([0.5, 0.01, 0.15, 0.13]), Branching.fitness_functions, self.branching.fitness)
self.g_fitness.on_clicked(self.update_fitness)
self.g_steps = Slider(plt.axes([0.8, 0.03, 0.1, 0.02]), 'Steps per\nupdate', 0, 3,
valinit=np.log10(self.steps_per_update), valfmt='%.0f')
self.g_steps.valtext.set_text(self.steps_per_update)
self.g_steps.on_changed(self.update_steps)
self.g_tgl = Button(plt.axes([0.92, 0.1, 0.07, 0.04]), "Stop/Go")
self.g_tgl.on_clicked(self.toggle_running)
def update_branch_prob(self, event):
self.branching.branch_prob = self.g_branch_prob.val
def update_max_size(self, event):
self.branching.max_size = int(10**self.g_max_size.val)
self.g_max_size.valtext.set_text(self.branching.max_size)
def update_fitness(self, label):
self.branching.set_fitness(label)
def update_steps(self, event):
self.steps_per_update = int(10**self.g_steps.val)
self.g_steps.valtext.set_text(self.steps_per_update)
def toggle_running(self, event):
if self.running:
self.ani.event_source.stop()
else:
self.ani.event_source.start()
self.running = not self.running
def update(self, i):
import time
t = time.time()
self.branching.run(self.steps_per_update)
self.ax.clear()
self.scaling()
self.scatter()
print("{}, time: {:.2}".format(self.branching.stats(), time.time() - t))
def run(self):
self.ani = animation.FuncAnimation(self.fig, self.update, interval=1, init_func=lambda: None)
self.running = True
plt.show()
ndim = 2
simulation = Simulation(Branching(branch_prob=0.05,
fitness="euclidean",
initial_population=np.zeros(shape=(1, ndim)),
max_size=1000),
steps_per_update=1)
simulation.run()
|
gpl-3.0
|
mcquay239/cg-lectures
|
mesh.py
|
1
|
4847
|
#!/usr/bin/env python
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from math import sqrt, fabs
from pyhull.convex_hull import ConvexHull
class Pole:
__slots__ = ("r", "c")
def __init__(self, r, c):
self.r = r
self.c = c
class Triangle:
__slots__ = ("v0", "v1", "incident_vertex")
def __init__(self, v0, incident_vertex):
self.v1 = None
self.v0 = v0
self.incident_vertex = incident_vertex
def add_simplex(self, v1):
assert self.v1 is None
self.v1 = v1
def circumscribed_circle(X, Y, Z, W=None):
if W is None:
W = X ** 2 + Y ** 2 + Z ** 2
O = np.ones(4)
Dx = + np.linalg.det([W, Y, Z, O])
Dy = - np.linalg.det([W, X, Z, O])
Dz = + np.linalg.det([W, X, Y, O])
a = np.linalg.det([X, Y, Z, O])
c = np.linalg.det([W, X, Y, Z])
C = np.array([Dx, Dy, Dz]) / 2 / a
d = Dx ** 2 + Dy ** 2 + Dz ** 2 - 4 * a * c
if d < 0:
return [0, 0, 0], -1
r = sqrt(d) / 2 / fabs(a)
return C, r
def draw_sphere(C, r, ax):
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = r * np.outer(np.cos(u), np.sin(v)) + C[0]
y = r * np.outer(np.sin(u), np.sin(v)) + C[1]
z = r * np.outer(np.ones(np.size(u)), np.cos(v)) + C[2]
ax.plot_surface(x, y, z, alpha=0.2, rstride=4, cstride=4, color='b', linewidth=0.5)
def main():
X, Y, Z = np.transpose(np.reshape(np.fromfile("resources/bun_zipper.xyz", sep=" "), (-1, 3)))
# X, Y, Z = np.transpose(np.reshape(np.fromfile("resources/dragon_vrip.xyz", sep=" "), (-1, 3)))
W = X ** 2 + Y ** 2 + Z ** 2
O = np.ones(4)
ch = ConvexHull(np.transpose([X, Y, Z, W]))
simplices = [s for s in ch.vertices if np.linalg.det([X[s], Y[s], Z[s], O]) < 0]
# for s in simplices:
# C, r = circumscribed_circle(X[s], Y[s], Z[s], W[s])
# for i in range(len(X)):
# if not i in s:
# v = [X[i], Y[i], Z[i]]
# if np.linalg.norm(v - C) < r:
# print(s, i, np.linalg.norm(v - C), r)
poles = {}
triangles = {}
for s in simplices:
C, r = circumscribed_circle(X[s], Y[s], Z[s], W[s])
for v in s:
if v not in poles or poles[v].r < r:
poles[v] = Pole(r, C)
for a, b, c, d in (0, 1, 2, 3), (0, 2, 3, 1), (0, 1, 3, 2), (1, 2, 3, 0):
t_idx = tuple(sorted((s[a], s[b], s[c])))
if t_idx in triangles:
triangles[t_idx].add_simplex(C)
else:
triangles[t_idx] = Triangle(C, s[d])
Nx, Ny, Nz = np.transpose([(np.array(poles[i].c) - np.array([X[i], Y[i], Z[i]])) / poles[i].r
if i in poles else [0, 0, 1] for i in range(len(X))])
def intersection_check(triangle, vertex, f, echo=False):
v = np.array([X[vertex], Y[vertex], Z[vertex]])
vn = np.array([Nx[vertex], Ny[vertex], Nz[vertex]])
v0 = np.array(triangle.v0)
d0 = np.dot(vn, (v0 - v) / np.linalg.norm(v0 - v))
if triangle.v1 is None:
idx = list(f)
p0, p1, p2 = np.transpose([X[idx], Y[idx], Z[idx]])
vp1, vp2 = p1 - p0, p2 - p0
ov = np.array([t[triangle.incident_vertex] for t in (X, Y, Z)])
pr0, pr1 = np.linalg.det([vp1, vp2, v0 - p0]), np.linalg.det([vp1, vp2, ov - p0])
if pr0 * pr1 >= 0:
return True
return -0.38 < d0 < 0.38
v1 = np.array(triangle.v1)
d1 = np.dot(vn, (v1 - v) / np.linalg.norm(v1 - v))
d0, d1 = sorted((d0, d1))
if echo:
print(d0, d1)
if d1 <= -0.38 or d0 >= 0.38:
return False
return True
candidate_triangles = {idx: triangle for idx, triangle in triangles.items()
if all(intersection_check(triangle, v, idx) for v in idx)}
with open("test.off", "w") as out:
out.write("OFF\n")
out.write("{} {} 0\n".format(len(X), len(candidate_triangles)))
for v in zip(X, Y, Z):
out.write("{} {} {}\n".format(*v))
for f, trg in candidate_triangles.items():
idx = list(f)
p0, p1, p2 = np.transpose([X[idx], Y[idx], Z[idx]])
if trg.v1 is None:
ov = np.array([t[trg.incident_vertex] for t in (X, Y, Z)])
if np.linalg.det([p1 - p0, p2 - p0, ov - p0]) > 0:
idx = reversed(idx)
else:
n = [sum(Nx[idx]), sum(Ny[idx]), sum(Nz[idx])]
if np.linalg.det([p1 - p0, p2 - p0, n]) < 0:
idx = reversed(idx)
out.write("3 {} {} {}\n".format(*idx))
if __name__ == "__main__":
main()
|
mit
|
ajylee/gpaw-rtxs
|
doc/exercises/band_structure/Si_guc.py
|
2
|
1475
|
from gpaw import GPAW
from ase.structure import bulk
from ase.dft.kpoints import ibz_points, get_bandpath
import numpy as np
si = bulk('Si', 'diamond', a=5.459)
if 1:
k = 6
calc = GPAW(kpts=(k, k, k),
xc='PBE')
si.set_calculator(calc)
e = si.get_potential_energy()
efermi = calc.get_fermi_level()
calc.write('Si-gs.gpw')
else:
efermi = GPAW('Si-gs.gpw', txt=None).get_fermi_level()
points = ibz_points['fcc']
G = points['Gamma']
X = points['X']
W = points['W']
K = points['K']
L = points['L']
kpts, x, X = get_bandpath([W, L, G, X, W, K], si.cell)
print len(kpts), len(x), len(X)
point_names = ['W', 'L', '\Gamma', 'X', 'W', 'K']
if 1:
calc = GPAW('Si-gs.gpw',
kpts=kpts,
fixdensity=True,
usesymm=None,#False,
basis='dzp',
convergence=dict(nbands=8))
e = calc.get_atoms().get_potential_energy()
calc.write('Si-bs.gpw')
calc = GPAW('Si-bs.gpw', txt=None)
import matplotlib.pyplot as plt
e = np.array([calc.get_eigenvalues(k) for k in range(len(kpts))])
e -= efermi
emin = e.min() - 1
emax = e[:, :8].max() + 1
for n in range(8):
plt.plot(x, e[:, n])
for p in X:
plt.plot([p, p], [emin, emax], 'k-')
plt.xticks(X, ['$%s$' % n for n in point_names])
plt.axis(xmin=0, xmax=X[-1], ymin=emin, ymax=emax)
plt.xlabel('k-vector')
plt.ylabel('Energy (eV)')
plt.title('PBE bandstructure of Silicon')
plt.savefig('Si-bs.png')
plt.show()
|
gpl-3.0
|
Fireblend/scikit-learn
|
sklearn/datasets/mldata.py
|
309
|
7838
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
|
bsd-3-clause
|
abhishekgahlot/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
323
|
1602
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
Vincentyao1995/Globalink2017-UBC
|
David_Zhu/test4.py
|
1
|
18688
|
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
from scipy.interpolate import interp1d
from scipy import signal
from scipy import optimize
'''
the LRTable is the different windows of the spectrum from the library.
'''
ComplexLRTable = [
[[22, 72, 138, 219, 298, 432, 472],
[62, 136, 219, 298, 401, 460, 489], ],
[[32, 147, 226, 315, 454],
[147, 226, 315, 450, 505], ],
[[54, 90, 143, 218, 312, 463],
[86, 131, 218, 312, 402, 494], ],
# 432--442 seems not important
# [[54, 90, 143, 218, 312, 432, 463],
# [86, 131, 218, 312, 402, 442, 494],],
[[16, 147, 224, 307, 426, 473],
[138, 224, 307, 421, 449, 485], ]
]
LRTable = [
[[138, 219, 298, ],
[219, 298, 401, ], ],
[[147, 226, 315],
[226, 315, 450], ],
[[143, 218, 312, ],
[218, 312, 402, ], ],
[[147, 224, 307, ],
[224, 307, 421, ], ]
]
# Finding the envelop of the spectrum
def qhull(sample):
'''
Finding the envelop of the spectrum
qhull(
sample numpy.ndarry of [wavelength, spectrum]
)
'''
link = lambda a, b: np.concatenate((a, b[1:]))
edge = lambda a, b: np.concatenate(([a], [b]))
def dome(sample, base):
h, t = base
dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))
outer = np.repeat(sample, dists > 0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:, 0]
base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
return link(dome(sample, base),
dome(sample, base[::-1]))
else:
return sample
def changehull(hull, name_list):
'''
change the hull to fit all the data within [0-1] by considering the endpoints.
:param hull:
:param name_list:
:return:
'''
c = 0
for index in range(len(hull)):
if hull[index][0] == name_list[-1]:
c = index
break
hull = hull[:c + 1]
hull = np.vstack((hull, hull[0]))
return hull
def inputData4Lib(testnumber):
'''
Input the standard library of the 4 REE spectrum: VNIR: 396nm -- 1003nm
:return:
'''
# Read the file and store in pandas
df = pd.read_excel('nameslib4data.xlsx', sheetname='Sheet1')
col_name = df.columns
name_list = list(col_name)
# xbegin = name_list.index(396.329987)
xbegin = name_list.index(600.820007)
wavelengths = np.array(name_list[xbegin:])
col_value = df.values
value_list = list(col_value)
spectrum = col_value[testnumber][xbegin:]
return wavelengths, spectrum, name_list
def inputsampledata():
'''
Input the samll sampledata of the Rock from: 600.820007
:param testnumber: the input line of the rock spectrum
:return:
'''
headers = list(['ID', 'X', 'Y'])
with open('header.txt', 'r') as f:
data = f.readlines()
for line in data:
spec = line.split(sep=',')
headers.extend(map(float, spec))
# headers.extend(spec)
name_list = list(headers)
xbegin = name_list.index(600.820007)
# df = pd.read_table('Box120_VNIR_sample1.txt', header=None, skiprows=[0, 1, 2, 3, 4, 5, 6, 7], sep='\s+')
df = pd.read_table('VNIR_sample1_25points.txt', header=None, skiprows=[0, 1, 2, 3, 4, 5, 6, 7], sep='\s+')
# print (df.head())
wavelengths = np.array(name_list[xbegin:])
col_value = df.values
value_list = list(col_value)
# global col_length
col_length = len(value_list)
# spectrum = col_value[testnumber][xbegin:]
return wavelengths, col_length, col_value, xbegin
def hull_to_spectrum(hull, wavelengths):
f = interp1d(hull['wavelength'], hull['intensity'])
return f(wavelengths)
def callibpeaknum(testnumber):
'''
calculate the peak of the library infromation
:param testnumber: from the library
:return: peakcounts, the peak number of the windows
peaklist, the peak index of the windows
peakheights, the peak heights of the windows
extermeminpeaks, the exterme minimum of the windows
'''
wavelengths, spectrum, name_list = inputData4Lib(testnumber)
sample = np.array([wavelengths, spectrum]).T
hull = qhull(sample)
hull = changehull(hull, name_list)
hull = pd.DataFrame(hull, columns=['wavelength', 'intensity'])
hull_spectrum = hull_to_spectrum(hull[:-1], wavelengths)
spectrum2 = spectrum / hull_spectrum
newspectrum = signal.savgol_filter(spectrum2, 17, 4, deriv=0)
# get the peak values of the minimum
peakind_tuple = signal.argrelmin(newspectrum)
peakind = peakind_tuple[0][:]
# print ('peakind:', peakind)
newspectrum2 = signal.savgol_filter(spectrum2, 17, 4, deriv=1)
# get the 1 derivative value of spectrum
peakargmin = signal.argrelmin(newspectrum2)
peakargmax = signal.argrelmax(newspectrum2)
lvalue = LRTable[testnumber][0]
rvalue = LRTable[testnumber][1]
# print ('lvalue:', lvalue)
# print ('rvalue:', rvalue)
peakcounts = list()
peaklist = list()
peakheights = list()
plotpeak = list()
extermeminpeaks = list()
for index in np.arange(len(lvalue)):
peaks = [val for val in peakind[peakind > lvalue[index]] if val in peakind[peakind < rvalue[index]]]
# print ('peaks:', peaks)
extermeminpeak_temp = np.argmin(newspectrum[peaks])
extermeminpeak = peaks[extermeminpeak_temp]
extermeminpeaks.append(extermeminpeak)
plotpeak.extend(peaks)
peakcounts.append(len(peaks))
peaklist.append(np.array(peaks))
predict_val = (newspectrum[rvalue[index]] + newspectrum[lvalue[index]]) / 2
peakheights.append(np.array(np.repeat(predict_val, len(peaks)) - newspectrum[peaks]))
def printval():
print('Peakcounts:', peakcounts)
print('Peaklist:', peaklist)
print('peakheights:', peakheights)
print('extermeminpeaks:', extermeminpeaks)
def showlipic():
plt.figure()
plt.title('The Library Spectrum:')
plt.plot(wavelengths, spectrum2, c='k', label='Removed Spectrum')
plt.scatter(wavelengths[plotpeak], spectrum2[plotpeak], c='r', label='Found peaks')
plt.scatter(wavelengths[lvalue], spectrum2[lvalue], c='g', label='Left endpoints')
plt.scatter(wavelengths[rvalue], spectrum2[rvalue], c='b', label='right endpoints')
plt.legend()
plt.figure()
plt.plot(wavelengths, spectrum2, c='k', label='Removed Spectrum')
plt.scatter(wavelengths[plotpeak], spectrum2[plotpeak], c='r', label='Found peaks')
plt.scatter(wavelengths[peakargmax], spectrum2[peakargmax], c='g', label='peakargmax endpoints')
plt.scatter(wavelengths[peakargmin], spectrum2[peakargmin], c='b', label='peakargmin endpoints')
plt.legend()
plt.show()
# printval()
# showlibpic()
return peakcounts, peaklist, peakheights, extermeminpeaks
def isPeakExist(libnumber, rockspectrum, libspectrum):
'''
Judge whether peak exists according to the rock spectrum and library spectrum
:param testnumber: the library spectrum number
:param rockspectrum: the rock spectrums
:param libspectrum: the library spectrums
:return: judge the similarity and the scaling params and RMS
'''
# print('Calculate the information of the library peaks:')
libpeakcounts, libpeaklist, libpeakheights, libextermeminpeaks = callibpeaknum(libnumber)
lbands = LRTable[libnumber][0]
rbands = LRTable[libnumber][1]
# smoothrockspectrum = signal.savgol_filter(rockspectrum, 33, 4, deriv=0)
smoothrockspectrum = rockspectrum
rockpeakind_tuple = signal.argrelmin(smoothrockspectrum)
rockpeakind = rockpeakind_tuple[0][:]
judgethreshold = 12
judgeflag = False
distance = 0
liballpeaks = list()
libminpeakdepth = list()
predrockmin = list()
predrockdepth = list()
rockmin = list()
for index in np.arange(len(lbands)):
# get each window of the library and rock spectrum.
band = smoothrockspectrum[lbands[index]: rbands[index]]
rockbandpeakind = [num for num in rockpeakind if (num > lbands[index] and num < rbands[index])]
# print('rockbandpeakind:', rockbandpeakind)
if len(rockbandpeakind) == 0:
print('peaks not exists')
return False, 0, 0, 0
# get the library peaks index
libpeak = libpeaklist[index]
liballpeaks.extend(libpeak)
# Use the shift of the peak distance to calculate the similarity
libmin = libextermeminpeaks[index]
# print ('libmin:', libmin)
depth_temp = (libspectrum[lbands[index]] + libspectrum[rbands[index]]) / 2 - libspectrum[libmin]
libminpeakdepth.append(depth_temp)
# get the really minimum peaks in rock and calculate distance use the really minimum values.
minval = min([smoothrockspectrum[item] for item in rockbandpeakind])
for item in rockbandpeakind:
if smoothrockspectrum[item] == minval:
distance += abs(libmin - item)
rockmin.append(item)
# get the minimum peaks from the rocks and calculate distance use the nearby one peak values.
# distance += min([abs(libmin - item) for item in rockbandpeakind])
# print('distance:', distance)
# Calculate the value which close to the peaks seems to be the extreme min values of the band
minval = min([abs(libmin - item) for item in rockbandpeakind])
for item in rockbandpeakind:
if abs(libmin - item) == minval:
predrockmin.append(item)
depth = (rockspectrum[lbands[index]] + rockspectrum[rbands[index]]) / 2 - rockspectrum[item]
predrockdepth.append(depth)
# print ('predrockdepth:', predrockdepth)
# Calculate depth of the original rock peaks
rockdepth = []
for peakindex in np.arange(len(rockbandpeakind)):
depth = (rockspectrum[lbands[index]] + rockspectrum[rbands[index]]) / 2 - rockspectrum[
rockbandpeakind[peakindex]]
rockdepth.append(depth)
# print ('rockdepth:', rockdepth)
# print('predrockmin: ', predrockmin)
# print ('predrockdepth: ', predrockdepth)
# Try1: Only use the minimum value to see whether it works 使用的是最靠近这个最小值的峰来进行判断的
def calscalling_extrememindepth():
print('Try1: Calculate Scalling with only the exterme minimum depth:')
scaling = 1.0
# print ('libminpeakdepth:', libminpeakdepth)
# print ('predrockdepth:', predrockdepth)
def residuals(p, libminpeakdepth, predrockdepth):
result = 0
for item in np.arange(len(libminpeakdepth)):
result += abs(libminpeakdepth[item] * p - predrockdepth[item])
return result
plsq = optimize.leastsq(residuals, scaling, args=(libminpeakdepth, predrockdepth))
scaling = plsq[0]
rms = residuals(scaling, libminpeakdepth, predrockdepth)
print('Scaling: %f, RMS: %f' % (scaling, rms))
# calscalling_extrememindepth()
# Try2: Use the same index to calculate depth 使用的是相同的index来计算的值
def calscalling_costantmindepth():
print('Try2: Calculate Scalling with only the constant depth:')
scaling = 1.0
# print ('libextermeminpeaks:', libextermeminpeaks)
# print ('libdepth:', libminpeakdepth)
def residuals(p, libextermeminpeaks):
result = 0
for item in np.arange(len(libextermeminpeaks)):
rockdepth = (smoothrockspectrum[lbands[item]] + smoothrockspectrum[rbands[item]]) / 2 - \
smoothrockspectrum[libextermeminpeaks[item]]
libdepth = libminpeakdepth[item]
result += abs(rockdepth - p * libdepth)
return result
plsq = optimize.leastsq(residuals, scaling, args=(libextermeminpeaks))
scaling = plsq[0]
rms = residuals(scaling, libextermeminpeaks)
print('Scaling: %f, RMS: %f' % (scaling, rms))
# calscalling_costantmindepth()
weight = [0.45, 0.45, 0.1]
# Try3: Use the multiple peaks of the library, the peakind is the costant number since it is not far away from those ones.
def calscaling_multiconstant_mindepth():
# print('Try3: Calculate Scalling with the multiply constant minimum depth:')
scaling = 1.0
# print ('libpeaklist:', libpeaklist)
def residuals(p, libpeaklist):
result = 0
for item in np.arange(len(libpeaklist)):
for t in libpeaklist[item]:
rockdepth = (smoothrockspectrum[lbands[item]] + smoothrockspectrum[rbands[item]]) / 2 - \
smoothrockspectrum[t]
libdepth = (libspectrum[lbands[item]] + libspectrum[rbands[item]]) / 2 - libspectrum[t]
result += abs(rockdepth - p * libdepth) * weight[item]
return result
plsq = optimize.leastsq(residuals, scaling, args=(libpeaklist))
scaling = plsq[0]
rms = residuals(scaling, libpeaklist)
# print ('Scaling: %f, RMS: %f' %(scaling, rms))
return scaling, rms
# calscaling_multiconstant_mindepth()
# Try4: 使用库中最接近的多个峰进行判断和拟合,使用的非定值,但也不能完全的非定值,只能先找到最低点的,然后对于原来库中的进行相对的偏移来进行判断拟合
def calscaling_multivariable_mindepth():
print('Try4: Calculate Scalling with the multiply variable minimum depth:')
scaling = 1.0
# print ('libpeaklist:', libpeaklist)
# print ('libextermeminpeaks:', libextermeminpeaks)
# print ('predrockmin:', predrockmin)
def residuals(p, libextermeminpeaks, predrockmin, libpeaklist):
result = 0
for index in np.arange(len(libextermeminpeaks)):
distance = libextermeminpeaks[index] - predrockmin[index]
for t in libpeaklist[index]:
rockdepth = (smoothrockspectrum[lbands[index]] + smoothrockspectrum[rbands[index]]) / 2 - \
smoothrockspectrum[t - distance]
libdepth = (libspectrum[lbands[index]] + libspectrum[rbands[index]]) / 2 - libspectrum[t]
result += abs(rockdepth - p * libdepth)
return result
plsq = optimize.leastsq(residuals, scaling, args=(libextermeminpeaks, predrockmin, libpeaklist))
scaling = plsq[0]
rms = residuals(scaling, libextermeminpeaks, predrockmin, libpeaklist)
print('Scaling: %f, RMS: %f' % (scaling, rms))
# calscaling_multivariable_mindepth()
def calscalling_samebandsweights():
# use the same index to calculate depth and the no consideration about the weights of all bands
print('liballpeaks:', liballpeaks)
scaling = [1.0]
def residuals(p, liballpeaks):
result = 0
for item in liballpeaks:
# should use the depth to calculate the results.
result += abs(smoothrockspectrum[item] - p * libspectrum[item])
return result
plsq = optimize.leastsq(residuals, scaling, args=(liballpeaks))
scaling = plsq[0]
rms = residuals(scaling, liballpeaks)
print('Scaling: %f, RMS: %f' % (scaling, rms))
averagedistance = distance / len(lbands)
print('averagedistance:', averagedistance)
scaling, rms = calscaling_multiconstant_mindepth()
if averagedistance < judgethreshold:
# print ('Peak Exists.')
return True, averagedistance, scaling, rms
else:
# print ('Peak not exist.')
return False, averagedistance, scaling, rms
# 后面设计时需要考虑到: 对于不同的band段所设置的权值可能不一样; 对于同一band段但是根据峰的高度来设定不同的权重; 如何确定那个band更加的重要
def calscallingwithgaussian():
pass
def calscallingwithsignaldepth():
pass
def getspectrum2(wavelengths, spectrum, name_list):
'''
Get the removed spectrum from the original spectrum
:param testnumber: from the library
:return: return the removed spectrum
'''
sample = np.array([wavelengths, spectrum]).T
hull = qhull(sample)
hull = changehull(hull, name_list)
# print (hull)
hull = pd.DataFrame(hull, columns=['wavelength', 'intensity'])
hull_spectrum = hull_to_spectrum(hull[:-1], wavelengths)
spectrum2 = spectrum / hull_spectrum
return spectrum2
def tottest(rockspectrum):
# rocknumber = int(input('Please input the rock test number:'))
libnumber = 0
wavelengths, libspectrum, libname_list = inputData4Lib(libnumber)
rocksmoothspectrum = signal.savgol_filter(rockspectrum, 131, 4, deriv=0)
# plt.figure()
# plt.plot(wavelengths, rocksmoothspectrum, label='original spectrum')
#
# plt.legend()
# plt.show()
libspectrum2 = getspectrum2(wavelengths, libspectrum, libname_list)
rockspectrum2 = getspectrum2(wavelengths, rocksmoothspectrum, libname_list)
# Calculate the lib peak information.
# print('Calculate the rock peak information:')
# calrockpeaknum(libnumber, rocknumber)
judgeflag, averagedistance, scaling, rms = isPeakExist(libnumber, rockspectrum2, libspectrum2)
return judgeflag, averagedistance, scaling, rms
# plt.show()
if __name__ == "__main__":
# if the scaling value is less than 0 then it also can say the peak doesn't exist.
wavelengths, col_length, col_value, xbegin = inputsampledata()
with open('Box120_VNIR_sample1_output.txt', 'wt') as f:
for number in np.arange(col_length):
rockspectrum = col_value[number][xbegin:]
flag, averagedistance, scaling, rms = tottest(rockspectrum)
if (flag == True):
print('Number %d has peaks.' % number)
print('Number %d has peaks.' % number, file=f)
print ('Averagedistance %.3f' %averagedistance, file=f)
print('Scaling: %.3f; RMS: %.3f\n' % (scaling, rms), file=f)
else:
print('Number %d doesnot has peaks.' % number)
|
mit
|
bastibl/gnuradio
|
gr-digital/python/digital/test_soft_decisions.py
|
7
|
5089
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy, sys
from matplotlib import pyplot
from gnuradio import digital
from .soft_dec_lut_gen import soft_dec_table, calc_soft_dec_from_table, calc_soft_dec
from .psk_constellations import psk_4_0, psk_4_1, psk_4_2, psk_4_3, psk_4_4, psk_4_5, psk_4_6, psk_4_7, sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3, sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7
from .qam_constellations import qam_16_0, sd_qam_16_0
def test_qpsk(i, sample, prec):
qpsk_const_list = [psk_4_0, psk_4_1, psk_4_2, psk_4_3,
psk_4_4, psk_4_5, psk_4_6, psk_4_7]
qpsk_lut_gen_list = [sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3,
sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7]
constel, code = qpsk_const_list[i]()
qpsk_lut_gen = qpsk_lut_gen_list[i]
rot_sym = 1
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([numpy.sqrt(constel_i.real**2 + constel_i.imag**2) for constel_i in constel])
#table = soft_dec_table_generator(qpsk_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec)
c.gen_soft_dec_lut(prec)
#c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qpsk_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
def test_qam16(i, sample, prec):
sample = sample / 1
qam_const_list = [qam_16_0, ]
qam_lut_gen_list = [sd_qam_16_0, ]
constel, code = qam_const_list[i]()
qam_lut_gen = qam_lut_gen_list[i]
rot_sym = 4
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([abs(constel_i) for constel_i in constel])
#table = soft_dec_table_generator(qam_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec, 1)
#c.gen_soft_dec_lut(prec)
c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qam_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code, 1)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
if __name__ == "__main__":
index = 0
prec = 8
x_re = 2*numpy.random.random()-1
x_im = 2*numpy.random.random()-1
x = x_re + x_im*1j
#x = -1 + -0.j
if 1:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qpsk(index, x, prec)
else:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qam16(index, x, prec)
k = numpy.log2(len(constel))
print("Sample: ", x)
print("Python Generator Calculated: ", (y_python_gen_calc))
print("Python Generator Table: ", (y_python_table))
print("Python Raw calc: ", (y_python_raw_calc))
print("C++ Table calc: ", (y_cpp_table))
print("C++ Raw calc: ", (y_cpp_raw_calc))
fig = pyplot.figure(1)
sp1 = fig.add_subplot(1,1,1)
sp1.plot([c.real for c in constel],
[c.imag for c in constel], 'bo')
sp1.plot(x.real, x.imag, 'ro')
sp1.set_xlim([-1.5, 1.5])
sp1.set_ylim([-1.5, 1.5])
fill = int(numpy.log2(len(constel)))
for i,c in enumerate(constel):
sp1.text(1.2*c.real, 1.2*c.imag, bin(code[i])[2:].zfill(fill),
ha='center', va='center', size=18)
pyplot.show()
|
gpl-3.0
|
mgeplf/NeuroM
|
examples/end_to_end_distance.py
|
5
|
4395
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Calculate and plot end-to-end distance of neurites.'''
import neurom as nm
from neurom import morphmath
import numpy as np
import matplotlib.pyplot as plt
def path_end_to_end_distance(neurite):
'''Calculate and return end-to-end-distance of a given neurite.'''
trunk = neurite.root_node.points[0]
return max(morphmath.point_dist(l.points[-1], trunk)
for l in neurite.root_node.ileaf())
def mean_end_to_end_dist(neurites):
'''Calculate mean end to end distance for set of neurites.'''
return np.mean([path_end_to_end_distance(n) for n in neurites])
def make_end_to_end_distance_plot(nb_segments, end_to_end_distance, neurite_type):
'''Plot end-to-end distance vs number of segments'''
plt.figure()
plt.plot(nb_segments, end_to_end_distance)
plt.title(neurite_type)
plt.xlabel('Number of segments')
plt.ylabel('End-to-end distance')
plt.show()
def calculate_and_plot_end_to_end_distance(neurite):
'''Calculate and plot the end-to-end distance vs the number of segments for
an increasingly larger part of a given neurite.
Note that the plots are not very meaningful for bifurcating trees.'''
def _dist(seg):
'''Distance between segmenr end and trunk'''
return morphmath.point_dist(seg[1], neurite.root_node.points[0])
end_to_end_distance = [_dist(s) for s in nm.iter_segments(neurite)]
make_end_to_end_distance_plot(np.arange(len(end_to_end_distance)) + 1,
end_to_end_distance, neurite.type)
if __name__ == '__main__':
# load a neuron from an SWC file
filename = 'test_data/swc/Neuron_3_random_walker_branches.swc'
nrn = nm.load_neuron(filename)
# print mean end-to-end distance per neurite type
print('Mean end-to-end distance for axons: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.AXON))
print('Mean end-to-end distance for basal dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.BASAL_DENDRITE))
print('Mean end-to-end distance for apical dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites
if n.type == nm.APICAL_DENDRITE))
print('End-to-end distance per neurite (nb segments, end-to-end distance, neurite type):')
for nrte in nrn.neurites:
# plot end-to-end distance for increasingly larger parts of neurite
calculate_and_plot_end_to_end_distance(nrte)
# print (number of segments, end-to-end distance, neurite type)
print(sum(len(s.points) - 1 for s in nrte.root_node.ipreorder()),
path_end_to_end_distance(nrte), nrte.type)
|
bsd-3-clause
|
ishank08/scikit-learn
|
examples/gaussian_process/plot_gpc_isoprobability.py
|
64
|
3049
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
|
bsd-3-clause
|
h-mayorquin/M2_complexity_thesis
|
Analysis/time_traces_simulation.py
|
1
|
4176
|
'''
Created on Jun 4, 2014
@author: ramon
'''
from functions import *
from analysis_functions import *
from plot_functions import plot_mutliplot_bilinear
import numpy as np
import cPickle
import matplotlib.pyplot as plt
import os
from time import localtime
####################
# Load the data
####################
folder = './data/'
cell_number = 8
cell = '_cell_' + str(cell_number)
quality = '_3000_21_'
stimuli_type = 'SparseNoise'
stimuli_type = 'DenseNoise'
file_format = '.pickle'
filename_vm = folder + 'vm' + cell + quality + stimuli_type + file_format
filename_images = folder + 'images'+ quality + stimuli_type + file_format
# Save things or not
save_figures = True # Save the figures if True
remove_axis = True # Remove axis if True
f = open(filename_vm,'rb')
vm = cPickle.load(f)
f = open(filename_images,'rb')
ims = cPickle.load(f)
# Scale and normalize
ims = ims / 100 #Scale
ims = ims - 0.5 # Center
ims2 = ims**2
Nside = ims.shape[2]
##########################33
# Parameters of the data
##########################33
#Scale and size values
dt = 1.0 # time sampling (ms)
dim = 21.0 # duration of the image (ms)
dh = 7.0 # resolution of the kernel (ms)
kernel_duration = 150 # ms
kernel_size = int(kernel_duration / dh)
# Scale factors
input_to_image = dt / dim # Transforms input to image
kernel_to_input = dh / dt # Transforms kernel to input
image_to_input = dim / dt # transforms imagen to input
## Input preprocesing
vm = downsample(vm,dt)
# Take the percentage of the total that is going to be used
percentage = 0.30
Ntotal = int(percentage * vm.size)
# Take the minimum between the maximum and the choice
Ntotal = np.min((Ntotal, vm.size))
V = vm[0:int(Ntotal)]
vm = None # Liberate memory
# Size of the training set as a percentage of the data
alpha = 0.95 # training vs total
Ntraining = int(alpha * Ntotal)
# Construct the set of indexes (training, test, working)
Ntest = 10000
remove_start = int(kernel_size * kernel_to_input) # Number of images in a complete kernel
Ntest = np.min((Ntest, Ntotal - Ntraining)) # Take Ntest more examples to test, or the rest available
working_indexes = np.arange(Ntotal)
working_indexes = working_indexes.astype(int)
training_indexes = np.arange(remove_start, Ntraining)
test_indexes = np.arange(Ntraining,Ntraining + Ntest)
test_indxes = test_indexes.astype(int)
# Calculate kernel
kernel_times = np.arange(kernel_size)
kernel_times = kernel_times.astype(int) # Make the values indexes
# Delay indexes
delay_indexes = np.floor(kernel_times * kernel_to_input)
delay_indexes = delay_indexes.astype(int)
# Image Indexes
image_indexes = np.zeros(working_indexes.size)
image_indexes[working_indexes] = np.floor(working_indexes * input_to_image)
image_indexes = image_indexes.astype(int)
# Center and normalize the output
mean = np.mean(V[training_indexes])
V = V - mean
#std = np.std(V)
#V = V / std
#V = V / (np.max(V) - np.min(V))
#V = V / (np.max(np.abs(V)))
###############################
# Time traces
###############################
# Let's first do a 10 x 10 grid with the positive traces
time_window = 200 # In ms
left_corner_x = 3
left_corner_y = 3
size = 4
count = 0
for x in xrange(left_corner_x, left_corner_x + size):
for y in xrange(left_corner_y, left_corner_y + size):
count += 1
positive = positive_time_trace(x,y, time_window, image_to_input, V, ims)
negative = negative_time_trace(x,y, time_window, image_to_input, V, ims)
plt.subplot(size, size, count)
plt.plot(positive)
plt.hold(True)
plt.plot(negative)
directory = './figures/'
formating='.pdf'
title = 'time_traces_' + stimuli_type + cell + quality
save_filename = directory + title + formating
figure = plt.gcf() # get current figure
if remove_axis:
# Remove axis
for i in xrange(size ** 2 ):
figure.get_axes()[i].get_xaxis().set_visible(False)
figure.get_axes()[i].get_yaxis().set_visible(False)
if save_figures:
figure.set_size_inches(16, 12)
plt.savefig(save_filename, dpi = 100)
os.system("pdfcrop %s %s" % (save_filename, save_filename))
plt.show()
|
bsd-2-clause
|
Merinorus/adaisawesome
|
Homework/03 - Interactive Viz/Mapping onto Switzerland.py
|
1
|
1723
|
# coding: utf-8
# In this part of the exercise, we now need to put the data which we have procured about the funding levels of the different universities that are located in different cantons onto a canton map. We will do so using Folio and take the example TopoJSON mapping which they use.
# In[15]:
import folium
import pandas as pd
# In[1]:
# Test seeing Switzerland
ch_map = folium.Map(location=[47.3769, 8.5417], tiles='Stamen Toner',
zoom_start=13)
ch_map.save('stamen_toner.html')
ch_map
# Now do the TopoJSON overlay
# In[61]:
# Import the Switzerland map (from the folio pylib notebook)
topo_path = r'ch-cantons.topojson.json'
# Import our csv file with all of the values for the amounts of the grants
data = 'P3_GrantExport.csv'
# Insert coordinates that are for Switzerland (i.e. 9.594226,47.525058)
ch_map = folium.Map(location=[46.8769, 8.6017], tiles='Mapbox Bright',
zoom_start=7)
ch_map.choropleth(geo_path=topo_path, topojson='objects.ch-cantons')
ch_map.save('ch_map.html')
ch_map
# In[ ]:
# Need to use colors wisely - becaue this is continuous and not descrete, we will be using different shades of green
# In[ ]:
#Catarina's test
import folium
import pandas as pd
topo_path = r'ch-cantons.topojson.json'
grants_data = pd.read_csv(state_unemployment)
#Let Folium determine the scale
map = folium.Map(location=[47.3769, 8.5417], zoom_start=13)
map.choropleth(geo_path=state_geo, data=grants_data,
columns=['Canton Shortname', 'Total Sum'],
key_on='feature.id',
fill_color='YlGn', fill_opacity=0.7, line_opacity=0.2,
legend_name='Total Grants Received (CHF)')
map.save('swiss_grants.html')
|
gpl-3.0
|
anurag03/integration_tests
|
cfme/utils/smem_memory_monitor.py
|
2
|
67384
|
"""Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process."""
import json
import time
import traceback
from collections import OrderedDict
from datetime import datetime
from threading import Thread
import os
import six
import yaml
from yaycl import AttrDict
from cfme.utils.conf import cfme_performance
from cfme.utils.log import logger
from cfme.utils.path import results_path
from cfme.utils.version import current_version
from cfme.utils.version import get_version
miq_workers = [
'MiqGenericWorker',
'MiqPriorityWorker',
'MiqScheduleWorker',
'MiqUiWorker',
'MiqWebServiceWorker',
'MiqWebsocketWorker',
'MiqReportingWorker',
'MiqReplicationWorker',
'MiqSmartProxyWorker',
'MiqVimBrokerWorker',
'MiqEmsRefreshCoreWorker',
# Refresh Workers:
'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker',
'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker',
'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker',
'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker',
'MiqEmsRefreshWorkerMicrosoft', # 5.4
'MiqEmsRefreshWorkerRedhat', # 5.4
'MiqEmsRefreshWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker',
'ManageIQ::Providers::Azure::CloudManager::RefreshWorker',
'ManageIQ::Providers::Google::CloudManager::RefreshWorker',
'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker',
'MiqEmsRefreshWorkerAmazon', # 5.4
'MiqEmsRefreshWorkerOpenstack', # 5.4
'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker',
'MiqEmsRefreshWorkerForemanConfiguration', # 5.4
'MiqEmsRefreshWorkerForemanProvisioning', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker',
'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Google::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker',
'MiqNetappRefreshWorker',
'MiqSmisRefreshWorker',
# Event Workers:
'MiqEventHandler',
'ManageIQ::Providers::Openstack::InfraManager::EventCatcher',
'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher',
'ManageIQ::Providers::Redhat::InfraManager::EventCatcher',
'ManageIQ::Providers::Vmware::InfraManager::EventCatcher',
'MiqEventCatcherRedhat', # 5.4
'MiqEventCatcherVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::EventCatcher',
'ManageIQ::Providers::Azure::CloudManager::EventCatcher',
'ManageIQ::Providers::Google::CloudManager::EventCatcher',
'ManageIQ::Providers::Openstack::CloudManager::EventCatcher',
'MiqEventCatcherAmazon', # 5.4
'MiqEventCatcherOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher',
# Metrics Processor/Collector Workers
'MiqEmsMetricsProcessorWorker',
'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerRedhat', # 5.4
'MiqEmsMetricsCollectorWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerAmazon', # 5.4
'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker',
'MiqStorageMetricsCollectorWorker',
'MiqVmdbStorageBridgeWorker']
ruby_processes = list(miq_workers)
ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb',
'appliance_console.rb'])
process_order = list(ruby_processes)
process_order.extend(['memcached', 'postgres', 'httpd', 'collectd'])
# Timestamp created at first import, thus grouping all reports of like workload
test_ts = time.strftime('%Y%m%d%H%M%S')
# 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work)
SAMPLE_INTERVAL = 10
class SmemMemoryMonitor(Thread):
def __init__(self, ssh_client, scenario_data):
super(SmemMemoryMonitor, self).__init__()
self.ssh_client = ssh_client
self.scenario_data = scenario_data
self.grafana_urls = {}
self.miq_server_id = ''
self.use_slab = False
self.signal = True
def create_process_result(self, process_results, starttime, process_pid, process_name,
memory_by_pid):
if process_pid in memory_by_pid.keys():
if process_name not in process_results:
process_results[process_name] = OrderedDict()
process_results[process_name][process_pid] = OrderedDict()
if process_pid not in process_results[process_name]:
process_results[process_name][process_pid] = OrderedDict()
process_results[process_name][process_pid][starttime] = {}
rss_mem = memory_by_pid[process_pid]['rss']
pss_mem = memory_by_pid[process_pid]['pss']
uss_mem = memory_by_pid[process_pid]['uss']
vss_mem = memory_by_pid[process_pid]['vss']
swap_mem = memory_by_pid[process_pid]['swap']
process_results[process_name][process_pid][starttime]['rss'] = rss_mem
process_results[process_name][process_pid][starttime]['pss'] = pss_mem
process_results[process_name][process_pid][starttime]['uss'] = uss_mem
process_results[process_name][process_pid][starttime]['vss'] = vss_mem
process_results[process_name][process_pid][starttime]['swap'] = swap_mem
del memory_by_pid[process_pid]
else:
logger.warn('Process {} PID, not found: {}'.format(process_name, process_pid))
def get_appliance_memory(self, appliance_results, plottime):
# 5.5/5.6 - RHEL 7 / Centos 7
# Application Memory Used : MemTotal - (MemFree + Slab + Cached)
# 5.4 - RHEL 6 / Centos 6
# Application Memory Used : MemTotal - (MemFree + Buffers + Cached)
# Available memory could potentially be better metric
appliance_results[plottime] = {}
result = self.ssh_client.run_command('cat /proc/meminfo')
if result.failed:
logger.error('Exit_status nonzero in get_appliance_memory: {}, {}'
.format(result.rc, result.output))
del appliance_results[plottime]
else:
meminfo_raw = result.output.replace('kB', '').strip()
meminfo = OrderedDict((k.strip(), v.strip()) for k, v in
(value.strip().split(':') for value in meminfo_raw.split('\n')))
appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024
appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024
if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7
self.use_slab = True
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Slab']) + float(meminfo['Cached']))) / 1024
else: # 5.4, RHEL 6/Centos 6
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Buffers']) + float(meminfo['Cached']))) / 1024
appliance_results[plottime]['used'] = mem_used
appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024
appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024
appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024
appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024
appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024
def get_evm_workers(self):
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c '
'\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format(
self.miq_server_id))
if result.output.strip():
workers = {}
for worker in result.output.strip().split('\n'):
pid_worker = worker.strip().split('|')
if len(pid_worker) == 2:
workers[pid_worker[0].strip()] = pid_worker[1].strip()
else:
logger.error('Unexpected output from psql: {}'.format(worker))
return workers
else:
return {}
# Old method of obtaining per process memory (Appliances without smem)
# def get_pids_memory(self):
# result = self.ssh_client.run_command(
# 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d')
# pids_memory = result.output.strip().split('\n')
# memory_by_pid = {}
# for line in pids_memory:
# values = [s for s in line.strip().split(' ') if s]
# pid = values[0]
# memory_by_pid[pid] = {}
# memory_by_pid[pid]['rss'] = float(values[1]) / 1024
# memory_by_pid[pid]['vss'] = float(values[2]) / 1024
# memory_by_pid[pid]['name'] = values[3]
# memory_by_pid[pid]['cmd'] = ' '.join(values[4:])
# return memory_by_pid
def get_miq_server_id(self):
# Obtain the Miq Server GUID:
result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID')
logger.info('Obtained appliance GUID: {}'.format(result.output.strip()))
# Get server id:
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"'
''.format(result.output.strip()))
logger.info('Obtained miq_server_id: {}'.format(result.output.strip()))
self.miq_server_id = result.output.strip()
def get_pids_memory(self):
result = self.ssh_client.run_command(
'smem -c \'pid rss pss uss vss swap name command\' | sed 1d')
pids_memory = result.output.strip().split('\n')
memory_by_pid = {}
for line in pids_memory:
if line.strip():
try:
values = [s for s in line.strip().split(' ') if s]
pid = values[0]
int(pid)
memory_by_pid[pid] = {}
memory_by_pid[pid]['rss'] = float(values[1]) / 1024
memory_by_pid[pid]['pss'] = float(values[2]) / 1024
memory_by_pid[pid]['uss'] = float(values[3]) / 1024
memory_by_pid[pid]['vss'] = float(values[4]) / 1024
memory_by_pid[pid]['swap'] = float(values[5]) / 1024
memory_by_pid[pid]['name'] = values[6]
memory_by_pid[pid]['cmd'] = ' '.join(values[7:])
except Exception as e:
logger.error('Processing smem output error: {}'.format(e.__class__.__name__, e))
logger.error('Issue with pid: {} line: {}'.format(pid, line))
logger.error('Complete smem output: {}'.format(result.output))
return memory_by_pid
def _real_run(self):
""" Result dictionaries:
appliance_results[timestamp][measurement] = value
appliance_results[timestamp]['total'] = value
appliance_results[timestamp]['free'] = value
appliance_results[timestamp]['used'] = value
appliance_results[timestamp]['buffers'] = value
appliance_results[timestamp]['cached'] = value
appliance_results[timestamp]['slab'] = value
appliance_results[timestamp]['swap_total'] = value
appliance_results[timestamp]['swap_free'] = value
appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free
process_results[name][pid][timestamp][measurement] = value
process_results[name][pid][timestamp]['rss'] = value
process_results[name][pid][timestamp]['pss'] = value
process_results[name][pid][timestamp]['uss'] = value
process_results[name][pid][timestamp]['vss'] = value
process_results[name][pid][timestamp]['swap'] = value
"""
appliance_results = OrderedDict()
process_results = OrderedDict()
install_smem(self.ssh_client)
self.get_miq_server_id()
logger.info('Starting Monitoring Thread.')
while self.signal:
starttime = time.time()
plottime = datetime.now()
self.get_appliance_memory(appliance_results, plottime)
workers = self.get_evm_workers()
memory_by_pid = self.get_pids_memory()
for worker_pid in workers:
self.create_process_result(process_results, plottime, worker_pid,
workers[worker_pid], memory_by_pid)
for pid in sorted(memory_by_pid.keys()):
if memory_by_pid[pid]['name'] == 'httpd':
self.create_process_result(process_results, plottime, pid, 'httpd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postgres':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postmaster':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'memcached':
self.create_process_result(process_results, plottime, pid, 'memcached',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'collectd':
self.create_process_result(process_results, plottime, pid, 'collectd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'ruby':
if 'evm_server.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'MIQ Server' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm_watchdog.rb', memory_by_pid)
elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'appliance_console.rb', memory_by_pid)
elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm:dbsync:replicate', memory_by_pid)
else:
logger.debug('Unaccounted for ruby pid: {}'.format(pid))
timediff = time.time() - starttime
logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4)))
# Sleep Monitoring interval
# Roughly 10s samples, accounts for collection of memory measurements
time_to_sleep = abs(SAMPLE_INTERVAL - timediff)
time.sleep(time_to_sleep)
logger.info('Monitoring CFME Memory Terminating')
create_report(self.scenario_data, appliance_results, process_results, self.use_slab,
self.grafana_urls)
def run(self):
try:
self._real_run()
except Exception as e:
logger.error('Error in Monitoring Thread: {}'.format(e))
logger.error('{}'.format(traceback.format_exc()))
def install_smem(ssh_client):
# smem is included by default in 5.6 appliances
logger.info('Installing smem.')
ver = get_version()
if ver == '55':
ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm']))
ssh_client.run_command('yum install -y smem')
# Patch smem to display longer command line names
logger.info('Patching smem')
ssh_client.run_command('sed -i s/\.27s/\.200s/g /usr/bin/smem')
def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls):
logger.info('Creating Memory Monitoring Report.')
ver = current_version()
provider_names = 'No Providers'
if 'providers' in scenario_data['scenario']:
provider_names = ', '.join(scenario_data['scenario']['providers'])
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
if not os.path.exists(str(workload_path)):
os.makedirs(str(workload_path))
scenario_path = workload_path.join(scenario_data['scenario']['name'])
if os.path.exists(str(scenario_path)):
logger.warn('Duplicate Workload-Scenario Name: {}'.format(scenario_path))
scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'),
scenario_data['scenario']['name']))
logger.warn('Using: {}'.format(scenario_path))
os.mkdir(str(scenario_path))
mem_graphs_path = scenario_path.join('graphs')
if not os.path.exists(str(mem_graphs_path)):
os.mkdir(str(mem_graphs_path))
mem_rawdata_path = scenario_path.join('rawdata')
if not os.path.exists(str(mem_rawdata_path)):
os.mkdir(str(mem_rawdata_path))
graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names)
graph_individual_process_measurements(mem_graphs_path, process_results, provider_names)
graph_same_miq_workers(mem_graphs_path, process_results, provider_names)
graph_all_miq_workers(mem_graphs_path, process_results, provider_names)
# Dump scenario Yaml:
with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file:
yaml.dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False)
generate_summary_csv(scenario_path.join('{}-summary.csv'.format(ver)), appliance_results,
process_results, provider_names, ver)
generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results)
generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data,
provider_names, grafana_urls)
generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls)
logger.info('Finished Creating Report')
def compile_per_process_results(procs_to_compile, process_results, ts_end):
alive_pids = 0
recycled_pids = 0
total_running_rss = 0
total_running_pss = 0
total_running_uss = 0
total_running_vss = 0
total_running_swap = 0
for process in procs_to_compile:
if process in process_results:
for pid in process_results[process]:
if ts_end in process_results[process][pid]:
alive_pids += 1
total_running_rss += process_results[process][pid][ts_end]['rss']
total_running_pss += process_results[process][pid][ts_end]['pss']
total_running_uss += process_results[process][pid][ts_end]['uss']
total_running_vss += process_results[process][pid][ts_end]['vss']
total_running_swap += process_results[process][pid][ts_end]['swap']
else:
recycled_pids += 1
return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \
total_running_vss, total_running_swap
def generate_raw_data_csv(directory, appliance_results, process_results):
starttime = time.time()
file_name = str(directory.join('appliance.csv'))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n')
for ts in appliance_results:
csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts,
appliance_results[ts]['total'], appliance_results[ts]['free'],
appliance_results[ts]['used'], appliance_results[ts]['buffers'],
appliance_results[ts]['cached'], appliance_results[ts]['slab'],
appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free']))
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = str(directory.join('{}-{}.csv'.format(process_pid, process_name)))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n')
for ts in process_results[process_name][process_pid]:
csv_file.write('{},{},{},{},{},{}\n'.format(ts,
process_results[process_name][process_pid][ts]['rss'],
process_results[process_name][process_pid][ts]['pss'],
process_results[process_name][process_pid][ts]['uss'],
process_results[process_name][process_pid][ts]['vss'],
process_results[process_name][process_pid][ts]['swap']))
timediff = time.time() - starttime
logger.info('Generated Raw Data CSVs in: {}'.format(timediff))
def generate_summary_csv(file_name, appliance_results, process_results, provider_names,
version_string):
starttime = time.time()
with open(str(file_name), 'w') as csv_file:
csv_file.write('Version: {}, Provider(s): {}\n'.format(version_string, provider_names))
csv_file.write('Measurement,Start of test,End of test\n')
start = appliance_results.keys()[0]
end = appliance_results.keys()[-1]
csv_file.write('Appliance Total Memory,{},{}\n'.format(
round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2)))
csv_file.write('Appliance Free Memory,{},{}\n'.format(
round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2)))
csv_file.write('Appliance Used Memory,{},{}\n'.format(
round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2)))
csv_file.write('Appliance Buffers,{},{}\n'.format(
round(appliance_results[start]['buffers'], 2),
round(appliance_results[end]['buffers'], 2)))
csv_file.write('Appliance Cached,{},{}\n'.format(
round(appliance_results[start]['cached'], 2),
round(appliance_results[end]['cached'], 2)))
csv_file.write('Appliance Slab,{},{}\n'.format(
round(appliance_results[start]['slab'], 2),
round(appliance_results[end]['slab'], 2)))
csv_file.write('Appliance Total Swap,{},{}\n'.format(
round(appliance_results[start]['swap_total'], 2),
round(appliance_results[end]['swap_total'], 2)))
csv_file.write('Appliance Free Swap,{},{}\n'.format(
round(appliance_results[start]['swap_free'], 2),
round(appliance_results[end]['swap_free'], 2)))
summary_csv_measurement_dump(csv_file, process_results, 'rss')
summary_csv_measurement_dump(csv_file, process_results, 'pss')
summary_csv_measurement_dump(csv_file, process_results, 'uss')
summary_csv_measurement_dump(csv_file, process_results, 'vss')
summary_csv_measurement_dump(csv_file, process_results, 'swap')
timediff = time.time() - starttime
logger.info('Generated Summary CSV in: {}'.format(timediff))
def generate_summary_html(directory, version_string, appliance_results, process_results,
scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('index.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format(
version_string, provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(version_string))
html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
start = appliance_results.keys()[0]
end = appliance_results.keys()[-1]
timediff = end - start
total_proc_count = 0
for proc_name in process_results:
total_proc_count += len(process_results[proc_name].keys())
growth = appliance_results[end]['used'] - appliance_results[start]['used']
max_used_memory = 0
for ts in appliance_results:
if appliance_results[ts]['used'] > max_used_memory:
max_used_memory = appliance_results[ts]['used']
html_file.write('<table border="1">\n')
html_file.write('<tr><td>\n')
# Appliance Wide Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Version</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Total Test Time</b></td>\n')
html_file.write('<td><b>Total Memory</b></td>\n')
html_file.write('<td><b>Start Used Memory</b></td>\n')
html_file.write('<td><b>End Used Memory</b></td>\n')
html_file.write('<td><b>Used Memory Growth</b></td>\n')
html_file.write('<td><b>Max Used Memory</b></td>\n')
html_file.write('<td><b>Total Tracked Processes</b></td>\n')
html_file.write('</tr>\n')
html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format(
version_string))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(unicode(timediff).partition('.')[0]))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(growth, 2)))
html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2)))
html_file.write('<td>{}</td>\n'.format(total_proc_count))
html_file.write('</table>\n')
# CFME/Miq Worker Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n')
html_file.write('<td><b>End Running Workers</b></td>\n')
html_file.write('<td><b>Recycled Workers</b></td>\n')
html_file.write('<td><b>End Total Worker RSS</b></td>\n')
html_file.write('<td><b>End Total Worker PSS</b></td>\n')
html_file.write('<td><b>End Total Worker USS</b></td>\n')
html_file.write('<td><b>End Total Worker VSS</b></td>\n')
html_file.write('<td><b>End Total Worker SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
miq_workers, process_results, end)
html_file.write('<tr>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Per Process Summaries:
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Application/Process Group</b></td>\n')
html_file.write('<td><b>Total Processes</b></td>\n')
html_file.write('<td><b>End Running Processes</b></td>\n')
html_file.write('<td><b>Recycled Processes</b></td>\n')
html_file.write('<td><b>End Total Process RSS</b></td>\n')
html_file.write('<td><b>End Total Process PSS</b></td>\n')
html_file.write('<td><b>End Total Process USS</b></td>\n')
html_file.write('<td><b>End Total Process VSS</b></td>\n')
html_file.write('<td><b>End Total Process SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
ruby_processes, process_results, end)
t_a_pids = a_pids
t_r_pids = r_pids
tt_rss = t_rss
tt_pss = t_pss
tt_uss = t_uss
tt_vss = t_vss
tt_swap = t_swap
html_file.write('<tr>\n')
html_file.write('<td>ruby</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# memcached Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['memcached'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>memcached</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# Postgres Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['postgres'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>postgres</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# httpd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'],
process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>httpd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# collectd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['collectd'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>collectd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>total</td>\n')
html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids))
html_file.write('<td>{}</td>\n'.format(t_a_pids))
html_file.write('<td>{}</td>\n'.format(t_r_pids))
html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Appliance Graph
html_file.write('</td></tr><tr><td>\n')
file_name = '{}-appliance_memory.png'.format(version_string)
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
file_name = '{}-appliance_swap.png'.format(version_string)
# Check for swap usage through out time frame:
max_swap_used = 0
for ts in appliance_results:
swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free']
if swap_used > max_swap_used:
max_swap_used = swap_used
if max_swap_used < 10: # Less than 10MiB Max, then hide graph
html_file.write('<br><a href=\'graphs/{}\'>Swap Graph '.format(file_name))
html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n')
else:
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
html_file.write('</td></tr><tr><td>\n')
# Per Process Results
html_file.write('<table style="width:100%" border="1"><tr>\n')
html_file.write('<td><b>Process Name</b></td>\n')
html_file.write('<td><b>Process Pid</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Time Alive</b></td>\n')
html_file.write('<td><b>RSS Mem Start</b></td>\n')
html_file.write('<td><b>RSS Mem End</b></td>\n')
html_file.write('<td><b>RSS Mem Change</b></td>\n')
html_file.write('<td><b>PSS Mem Start</b></td>\n')
html_file.write('<td><b>PSS Mem End</b></td>\n')
html_file.write('<td><b>PSS Mem Change</b></td>\n')
html_file.write('<td><b>CSV</b></td>\n')
html_file.write('</tr>\n')
# By Worker Type Memory Used
for ordered_name in process_order:
if ordered_name in process_results:
for pid in process_results[ordered_name]:
start = process_results[ordered_name][pid].keys()[0]
end = process_results[ordered_name][pid].keys()[-1]
timediff = end - start
html_file.write('<tr>\n')
if len(process_results[ordered_name]) > 1:
html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name,
ordered_name))
html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
else:
html_file.write('<td>{}</td>\n'.format(ordered_name))
html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(unicode(timediff).partition('.')[0]))
rss_change = process_results[ordered_name][pid][end]['rss'] - \
process_results[ordered_name][pid][start]['rss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(rss_change, 2)))
pss_change = process_results[ordered_name][pid][end]['pss'] - \
process_results[ordered_name][pid][start]['pss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(pss_change, 2)))
html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format(
pid, ordered_name))
html_file.write('</tr>\n')
else:
logger.debug('Process/Worker not part of test: {}'.format(ordered_name))
html_file.write('</table>\n')
# Worker Graphs
for ordered_name in process_order:
if ordered_name in process_results:
html_file.write('<tr><td>\n')
html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format(
ordered_name, ordered_name))
if len(process_results[ordered_name]) > 1:
file_name = '{}-all.png'.format(ordered_name)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name,
file_name))
else:
for pid in sorted(process_results[ordered_name]):
file_name = '{}-{}.png'.format(ordered_name, pid)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(
file_name, file_name))
html_file.write('</td></tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Summary html in: {}'.format(timediff))
def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('workload.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {}</title></head>'.format(
scenario_data['test_name'], provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(ver))
html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
html_file.write('<br><b>Scenario Data: </b><br>\n')
yaml_html = get_scenario_html(scenario_data['scenario'])
html_file.write(yaml_html + '\n')
html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n')
html_file.write('<table border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> System Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
system_path = ('../version_info/system.csv')
html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>'
.format(system_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
process_path = ('../version_info/processes.csv')
html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>'
.format(process_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
gems_path = ('../version_info/gems.csv')
html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>'
.format(gems_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
rpms_path = ('../version_info/rpms.csv')
html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>'
.format(rpms_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Workload html in: {}'.format(timediff))
def add_workload_quantifiers(quantifiers, scenario_data):
starttime = time.time()
ver = current_version()
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
directory = workload_path.join(scenario_data['scenario']['name'])
file_name = str(directory.join('workload.html'))
marker = '<b>Quantifier Data: </b>'
yaml_dict = quantifiers
yaml_string = str(json.dumps(yaml_dict, indent=4))
yaml_html = yaml_string.replace('\n', '<br>\n')
with open(file_name, 'r+') as html_file:
line = ''
while marker not in line:
line = html_file.readline()
marker_pos = html_file.tell()
remainder = html_file.read()
html_file.seek(marker_pos)
html_file.write('{} \n'.format(yaml_html))
html_file.write(remainder)
timediff = time.time() - starttime
logger.info('Added quantifiers in: {}'.format(timediff))
def get_scenario_html(scenario_data):
scenario_dict = create_dict(scenario_data)
scenario_yaml = yaml.dump(scenario_dict)
scenario_html = scenario_yaml.replace('\n', '<br>\n')
scenario_html = scenario_html.replace(', ', '<br>\n - ')
scenario_html = scenario_html.replace(' ', ' ')
scenario_html = scenario_html.replace('[', '<br>\n - ')
scenario_html = scenario_html.replace(']', '\n')
return scenario_html
def create_dict(attr_dict):
main_dict = dict(attr_dict)
for key, value in six.iteritems(main_dict):
if type(value) == AttrDict:
main_dict[key] = create_dict(value)
return main_dict
def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from cycler import cycler
starttime = time.time()
dates = appliance_results.keys()
total_memory_list = list(appliance_results[ts]['total'] for ts in appliance_results.keys())
free_memory_list = list(appliance_results[ts]['free'] for ts in appliance_results.keys())
used_memory_list = list(appliance_results[ts]['used'] for ts in appliance_results.keys())
buffers_memory_list = list(
appliance_results[ts]['buffers'] for ts in appliance_results.keys())
cache_memory_list = list(appliance_results[ts]['cached'] for ts in appliance_results.keys())
slab_memory_list = list(appliance_results[ts]['slab'] for ts in appliance_results.keys())
swap_total_list = list(appliance_results[ts]['swap_total'] for ts in
appliance_results.keys())
swap_free_list = list(appliance_results[ts]['swap_free'] for ts in appliance_results.keys())
# Stack Plot Memory Usage
file_name = graphs_path.join('{}-appliance_memory.png'.format(ver))
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue',
'forestgreen'])
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Memory'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
if use_slab:
y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list]
else:
y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
if use_slab:
ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] +
slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
else:
ax.annotate(str(round(buffers_memory_list[0], 2)), xy=(
dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1],
used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='coral')
p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue')
p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
if use_slab:
ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
else:
ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Stack Plot Swap usage
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen'])
file_name = graphs_path.join('{}-appliance_swap.png'.format(ver))
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Swap'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Swap (MiB)')
swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)]
y = [swap_used_list, swap_free_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Reset Colors
mpl.rcdefaults()
timediff = time.time() - starttime
logger.info('Plotted Appliance Memory in: {}'.format(timediff))
def graph_all_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
file_name = graph_file_path.join('all-processes.png')
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAll Workers/Monitored Processes'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_name in process_results:
if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name:
for process_pid in process_results[process_name]:
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid,
process_name))
plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format(
process_pid, process_name))
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted All Type/Process Memory in: {}'.format(timediff))
def graph_individual_process_measurements(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = graph_file_path.join('{}-{}.png'.format(process_name, process_pid))
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
fig, ax = plt.subplots()
plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names,
process_name, process_pid))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
plt.plot(dates, rss_samples, linewidth=1, label='RSS')
plt.plot(dates, pss_samples, linewidth=1, label='PSS')
plt.plot(dates, uss_samples, linewidth=1, label='USS')
plt.plot(dates, vss_samples, linewidth=1, label='VSS')
plt.plot(dates, swap_samples, linewidth=1, label='Swap')
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Individual Process Memory in: {}'.format(timediff))
def graph_same_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
if len(process_results[process_name]) > 1:
logger.debug('Plotting {} {} processes on single graph.'.format(
len(process_results[process_name]), process_name))
file_name = graph_file_path.join('{}-all.png'.format(process_name))
fig, ax = plt.subplots()
pids = 'PIDs: '
for i, pid in enumerate(process_results[process_name], 1):
pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0]))
pids = pids[0:-2]
plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names,
process_name, pids))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_pid in process_results[process_name]:
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} RSS'.format(process_pid))
plt.plot(dates, pss_samples, linewidth=1, label='{} PSS'.format(process_pid))
plt.plot(dates, uss_samples, linewidth=1, label='{} USS'.format(process_pid))
plt.plot(dates, vss_samples, linewidth=1, label='{} VSS'.format(process_pid))
plt.plot(dates, swap_samples, linewidth=1, label='{} SWAP'.format(process_pid))
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1],
rss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0],
pss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1],
pss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0],
uss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1],
uss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0],
vss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1],
vss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0],
swap_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1],
swap_samples[-1]), xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Same Type/Process Memory in: {}'.format(timediff))
def summary_csv_measurement_dump(csv_file, process_results, measurement):
csv_file.write('---------------------------------------------\n')
csv_file.write('Per Process {} Memory Usage\n'.format(measurement.upper()))
csv_file.write('---------------------------------------------\n')
csv_file.write('Process/Worker Type,PID,Start of test,End of test\n')
for ordered_name in process_order:
if ordered_name in process_results:
for process_pid in sorted(process_results[ordered_name]):
start = process_results[ordered_name][process_pid].keys()[0]
end = process_results[ordered_name][process_pid].keys()[-1]
csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid,
round(process_results[ordered_name][process_pid][start][measurement], 2),
round(process_results[ordered_name][process_pid][end][measurement], 2)))
|
gpl-2.0
|
pompiduskus/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
Eric89GXL/scipy
|
scipy/spatial/_spherical_voronoi.py
|
3
|
12556
|
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import numpy as np
import scipy
import itertools
from . import _voronoi
from scipy.spatial.distance import pdist
__all__ = ['SphericalVoronoi']
def sphere_check(points, radius, center):
""" Determines distance of generators from theoretical sphere
surface.
"""
actual_squared_radii = (((points[...,0] - center[0]) ** 2) +
((points[...,1] - center[1]) ** 2) +
((points[...,2] - center[2]) ** 2))
max_discrepancy = (np.sqrt(actual_squared_radii) - radius).max()
return abs(max_discrepancy)
def calc_circumcenters(tetrahedrons):
""" Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None, threshold=1e-06):
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
if pdist(self.points).min() <= threshold * self.radius:
raise ValueError("Duplicate generators present.")
max_discrepancy = sphere_check(self.points,
self.radius,
self.center)
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
# tetrahedrons will have shape: (2N-4, 4, 3)
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
# circumcenters will have shape: (2N-4, 3)
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
# self.vertices will have shape: (2N-4, 3)
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
# simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(self._tri.simplices.shape[0])
# tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices, simplex_indices,
simplex_indices]).ravel()
# point_indices will have shape: (6N-12,)
point_indices = self._tri.simplices.ravel()
# array_associations will have shape: (6N-12, 2)
array_associations = np.dstack((point_indices, tri_indices))[0]
array_associations = array_associations[np.lexsort((
array_associations[...,1],
array_associations[...,0]))]
array_associations = array_associations.astype(np.intp)
# group by generator indices to produce
# unsorted regions in nested list
groups = [list(list(zip(*list(g)))[1])
for k, g in itertools.groupby(array_associations,
lambda t: t[0])]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
_voronoi.sort_vertices_of_regions(self._tri.simplices, self.regions)
|
bsd-3-clause
|
jcrist/blaze
|
blaze/compute/tests/test_comprehensive.py
|
11
|
4570
|
from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame
import numpy as np
from odo import resource, into
from datashape.predicates import isscalar, iscollection, isrecord
from blaze.expr import symbol, by
from blaze.interactive import Data
from blaze.compute import compute
from blaze.expr.functions import sin, exp
sources = []
t = symbol('t', 'var * {amount: int64, id: int64, name: string}')
L = [[ 100, 1, 'Alice'],
[ 200, 2, 'Bob'],
[ 300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[ 500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
sources = [df, x]
try:
import sqlalchemy
sql = resource('sqlite:///:memory:::accounts', dshape=t.dshape)
into(sql, L)
sources.append(sql)
except:
sql = None
try:
import bcolz
bc = into(bcolz.ctable, df)
sources.append(bc)
except ImportError:
bc = None
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
try:
db = pymongo.MongoClient().db
try:
coll = db._test_comprehensive
except AttributeError:
coll = db['_test_comprehensive']
coll.drop()
mongo = into(coll, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
abs(t['amount']): [],
t.id.max(): [],
t.amount.sum(): [],
t.amount.sum(keepdims=True): [],
t.amount.count(keepdims=True): [],
t.amount.nunique(keepdims=True): [mongo],
t.amount.nunique(): [],
t.amount.head(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t.like(name='Alic*'): [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, schema='int64', name='id'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t.name, total=t.amount.sum()): [],
by(t.id, count=t.id.count()): [],
by(t[['id', 'amount']], count=t.id.count()): [],
by(t[['id', 'amount']], total=(t.amount + 1).sum()): [mongo],
by(t[['id', 'amount']], n=t.name.nunique()): [mongo, bc],
by(t.id, count=t.amount.count()): [],
by(t.id, n=t.id.nunique()): [mongo, bc],
# by(t, count=t.count()): [],
# by(t.id, count=t.count()): [],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
t[0]: [sql, mongo, bc],
t[::2]: [sql, mongo, bc],
t.id.utcfromtimestamp: [sql],
t.distinct().nrows: [],
t.nelements(axis=0): [],
t.nelements(axis=None): [],
t.amount.truncate(200): [sql]
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
# and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def typename(obj):
return type(obj).__name__
def test_base():
for expr, exclusions in expressions.items():
if iscollection(expr.dshape):
model = into(DataFrame, into(np.ndarray, expr._subs({t: Data(base, t.dshape)})))
else:
model = compute(expr._subs({t: Data(base, t.dshape)}))
print('\nexpr: %s\n' % expr)
for source in sources:
if id(source) in map(id, exclusions):
continue
print('%s <- %s' % (typename(model), typename(source)))
T = Data(source)
if iscollection(expr.dshape):
result = into(type(model), expr._subs({t: T}))
if isscalar(expr.dshape.measure):
assert set(into(list, result)) == set(into(list, model))
else:
assert df_eq(result, model)
elif isrecord(expr.dshape):
result = compute(expr._subs({t: T}))
assert into(tuple, result) == into(tuple, model)
else:
result = compute(expr._subs({t: T}))
try:
result = result.scalar()
except AttributeError:
pass
assert result == model
|
bsd-3-clause
|
dgary50/eovsa
|
fem_attn_calib.py
|
1
|
16015
|
# !/usr/bin/env python
# Hacked from Jim's dump_tsys_ext.py (pipeline:test_svn/python/, version 2016_may_25)
# 2016-08-01 BC: added rd_fem_attn() to read the FEM attenuation records fron the stateframe
# added cal_fem_gain() to convert FEM attenuation records to gain change,
# by taking a fem_attn_inc parameter from appropriate calibration
# added calc_fem_attn_inc() to calculate the additional FEM attenuation calibrations
# modified udbfile_create() to incorporate FEM attenuation corrections
import os
import numpy as np
from util import Time
import dump_tsys_ext
import dbutil as db
from util import Time
import read_idb as ri
import pdb
def fem_attn_anal(idb_calib='/dppdata1/IDB/IDB20160731231934/',doplot=False, wrt2sql=False):
import cal_header as ch
'''Calculate additional corrections to the FEM attenuators at each bit change (0, 1, 2, 4, 8, 16)dB;
Values are based on the measurement IDB20160731231934, the sequence is specified in
helios:Dropbox/PythonCode/Current/FEATTNTEST2.ctl:
1. FEMAUTO-OFF, 2. FEMATTN 15 (both 31 dB, to get the bkg),
3. Change the 1st H and V attn to 0, 1, 2, 4, 8, 16 dB every 30s, while keeping the 2nd to be 8 dB,
4. Change the 2nd H and V attn to 0, 1, 2, 4, 8, 16 dB every 30s, while keeping the 1st to be 8 dB
return value:
fem_attn_inc (nant, npol, # of FEM attn, 5):
additional corrections in dB w.r.t. the nominal values when bit changes'''
import matplotlib.pyplot as plt
out=ri.read_idb([idb_calib])
nant, npol, nf, nt=out['p'].shape
if doplot:
# show the auto-correlation, use it to find time indices for each attn state
f, ax = plt.subplots(5,3)
for i in range(15):
ax[i / 3, i % 3].imshow(out['p'][i,0])
# define time idx ranges
# each state lasted 30 s
tidxs=[25+i*30 for i in range(13)] #begin idx for avg
tidxe=[idx+20 for idx in tidxs] #end idx for avg
bkg=np.mean(out['p'][:,:,:,tidxs[0]:tidxe[0]],axis=3)
# measurements for 12 attn states
p_1=np.zeros([nant,npol,nf,6]) #power values for 6 states of 1st FEM attn change
p_2=np.zeros([nant,npol,nf,6]) #power values for 6 states of 2nd FEM attn change
rp_1=np.zeros([nant,npol,nf,6]) #ratio of the power regarding to the reference state
rp_2=np.zeros([nant,npol,nf,6])
rdb_1=np.zeros([nant,npol,nf,6]) #power ratio converted to dB
rdb_2=np.zeros([nant,npol,nf,6])
# nominal dB values
attns1=np.array([0.,1.,2.,4.,8.,16.])
attns2=np.array([0.,1.,2.,4.,8.,16.])
# reference state
attn_idx_ref=0 # 0 dB state
attns1-=attns1[attn_idx_ref]
attns2-=attns2[attn_idx_ref]
for i in range(12):
if i < 6:
p_1[:,:,:,i]=np.mean(out['p'][:,:,:,tidxs[i+1]:tidxe[i+1]],axis=3)-bkg
else:
p_2[:,:,:,i-6]=np.mean(out['p'][:,:,:,tidxs[i+1]:tidxe[i+1]],axis=3)-bkg
for i in range(6):
rp_1[:,:,:,i]=p_1[:,:,:,i]/p_1[:,:,:,attn_idx_ref]
rp_2[:,:,:,i]=p_2[:,:,:,i]/p_2[:,:,:,attn_idx_ref]
rdb_1=-10.*np.log10(rp_1)
rdb_2=-10.*np.log10(rp_2)
ddb_1=rdb_1-attns1 # additional dB correction wrt the nominal values, FEM attn 1
ddb_2=rdb_2-attns2 # additional dB correction wrt the nominal values, FEM attn 2
# plot the additional dB correction
if doplot:
f1, ax1 = plt.subplots(6,5)
for i in range(15):
ax1[i / 5, i % 5].imshow(ddb_1[i,0],vmin=-2,vmax=2)
ax1[i / 5+3, i % 5].imshow(ddb_1[i,1],vmin=-2,vmax=2)
f2, ax2 = plt.subplots(6,5)
for i in range(15):
ax2[i / 5, i % 5].imshow(ddb_2[i,0],vmin=-2,vmax=2)
ax2[i / 5+3, i % 5].imshow(ddb_2[i,1],vmin=-2,vmax=2)
# generate corrections for the nominal values
chran=[0,90] #range of frequency channels to average
fem_attn_bitv=np.zeros([16,npol,2,5],dtype=np.complex)
fem_attn_bitv[:,:,0,:]=np.nan_to_num(np.mean(rdb_1[:,:,:,1:],axis=2))+0j
fem_attn_bitv[:,:,1,:]=np.nan_to_num(np.mean(rdb_2[:,:,:,1:],axis=2))+0j
if wrt2sql:
ch.fem_attn_val2sql(fem_attn_bitv,t=Time(out['time'][0],format='jd'))
return fem_attn_bitv
def fem_attn_update(fem_attn, t=None, rdfromsql=True):
'''Given a record of the frontend attenuation levels from the stateframe, recalculate the corrected attenuations levels.
fem_attn_in: recorded attn levels in a 10-min duration
fem_attn_bitv: complex corrections to be applied to the data. Read from the stateframe or provided as a (16, 2, 2, 5) array'''
import cal_header as ch
import stateframe as stf
if rdfromsql:
xml, buf = ch.read_cal(7,t)
fem_attn_bitv=np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Real'])) + np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Imag'])) * 1j
h1=fem_attn['h1']
h2=fem_attn['h2']
v1=fem_attn['v1']
v2=fem_attn['v2']
attn=np.concatenate((np.concatenate((h1[...,None],v1[...,None]),axis=2)[...,None],
np.concatenate((h2[...,None],v2[...,None]),axis=2)[...,None]),axis=3)
# Start with 0 attenuation as reference
fem_attn_out=attn*0
# Calculate resulting attenuation based on bit attn values (1,2,4,8,16)
for i in range(5):
fem_attn_out = fem_attn_out + (np.bitwise_and(attn,2**i)>>i)*fem_attn_bitv[...,i]
#fem_gain=10.**(-fem_gain_db/10.)
return fem_attn_out
def udbfile_create(filelist, ufilename, verbose=False):
'''Given a list of IDB filenames, create the appropriate UDB file, by
averaging over energy bands, but keep 1 second time resolution.
FEM attn values and corrections are taken from the stateframe'''
import aipy
if len(filelist) == 0:
print 'udbfile_create: No files input'
return []
#endif
# Be sure that files exist, and has all of the appropriate elements
filelist_test, ok_filelist, bad_filelist = dump_tsys_ext.valid_miriad_dataset(filelist)
if len(ok_filelist) == 0:
print 'udbfile_create: No valid files input'
return []
#endif
# Replicate rd_miriad_tsys_file here: Open first file and use that
# to replicate the NRV (non-record-variable) variables
uv = aipy.miriad.UV(ok_filelist[0])
src = uv['source']
scanid = uv['scanid']
nants = uv['nants']
# The assumption here is that all the variables are going to be
# there since the valid_miriad_dataset was invoked
uvout = aipy.miriad.UV(ufilename, 'new')
nrv_varlist_string = ['name', 'telescop', 'project', 'operator', 'version', 'source', 'scanid', 'proj', 'antlist', 'obstype']
for j in range(len(nrv_varlist_string)):
uvout.add_var(nrv_varlist_string[j], 'a')
uvout[nrv_varlist_string[j]] = uv[nrv_varlist_string[j]]
#endfor
nrv_varlist_int = ['nants', 'npol']
for j in range(len(nrv_varlist_int)):
uvout.add_var(nrv_varlist_int[j], 'i')
uvout[nrv_varlist_int[j]] = uv[nrv_varlist_int[j]]
#endfor
nrv_varlist_rl = ['vsource', 'veldop', 'inttime', 'epoch']
for j in range(len(nrv_varlist_rl)):
uvout.add_var(nrv_varlist_rl[j], 'r')
uvout[nrv_varlist_rl[j]] = uv[nrv_varlist_rl[j]]
#endfor
nrv_varlist_rl8 = ['freq', 'restfreq', 'antpos', 'ra', 'dec', 'obsra', 'obsdec']
for j in range(len(nrv_varlist_rl8)):
uvout.add_var(nrv_varlist_rl8[j], 'd')
uvout[nrv_varlist_rl8[j]] = uv[nrv_varlist_rl8[j]]
#endfor
#sfreq, sdf, nchan, nspect will change
#navg = 10
navg = 1
sfreq_in = uv['sfreq']
nchan_in = len(sfreq_in)
nch_avg = nchan_in/navg #will crash if this is not an integer
#arange gives me an array
a = np.arange(0, nchan_in, navg)
b = a+navg-1
#strip out zero frequencies
ppp = np.where(sfreq_in[a] > 0)
a = a[ppp]
b = b[ppp]
na = len(a)
indexmax = np.amax(np.where(sfreq_in > 0))
if b[na-1] > indexmax:
b[na-1] = indexmax
#end if
#Now you have start end end subscripts for the frequency bands
sfedg = sfreq_in[a]
sfedg = np.append(sfedg, sfreq_in[b[na-1]])
#bin midpoints
sfreq_out = 0.5*(sfreq_in[a]+sfreq_in[b])
sdf_out = sfreq_in[b]-sfreq_in[a]
#add these vars
uvout.add_var('nspect', 'i')
uvout['nspect'] = na
uvout.add_var('sfreq', 'd')
uvout['sfreq'] = sfreq_out
uvout.add_var('sdf', 'd')
uvout['sdf'] = sdf_out
uvout.add_var('sfedg', 'd')
uvout['sfedg'] = sfedg
#spectral windows
nschan = np.zeros(na, dtype=np.int)
ischan = np.zeros(na, dtype=np.int)
for j in range(na):
nschan[j] = 1
ischan[j] = j+1
#endfor
uvout.add_var('nschan', 'i')
uvout['nschan'] = nschan
uvout.add_var('ischan', 'i')
uvout['ischan'] = ischan
#define the record variables here
uvout.add_var('ut', 'd')
uvout.add_var('xtsys', 'r')
uvout.add_var('ytsys', 'r')
uvout.add_var('delay', 'd')
uvout.add_var('pol', 'i')
#version test
if 'xtsys' in uv.vartable:
version = "1.0"
else:
version = "2.0"
#endelse
# Loop over filenames, and add the other variables
init = False
init_pol = False
pol = -71 #dummy
ut = 0.0 #not necessarily the same ut variable as in the preamble
xcount = 0
utcount = 0
for filename in ok_filelist:
uv = aipy.miriad.UV(filename)
# generate FEM attn gain corrections
fem_attn=rd_fem_attn(uv)
fem_attn_out=fem_attn_update(fem_attn)
fem_gain=10.**(-fem_attn_out/10.)
timejd=Time(fem_attn['timestamp'].astype('int'),format='lv').jd
if uv['source'] != src or uv['scanid'] != scanid:
print 'Source name:',uv['source'],'is different from initial source name:',src
print 'Or scanid:',uv['scanid'],'is different from initial source name:',scanid
print 'Will stop processing files.'
break
#endif
for preamble, data in uv.all():
# look up for gain correction in fem_gain
uvw, t, (ant1, ant2) = preamble
polstr=aipy.miriad.pol2str[uv['pol']]
tidx=np.abs(timejd-t).argmin()-1
if np.abs(timejd-t).min() < 1./24./3600. and (ant1 < 15) and (ant2 < 15):
if polstr[0] == 'x':
p1=0
if polstr[0] == 'y':
p1=1
gain1=fem_gain[tidx,ant1,p1,0]*fem_gain[tidx,ant1,p1,1]
if polstr[1] == 'x':
p2=0
if polstr[1] == 'y':
p2=1
gain2=fem_gain[tidx,ant2,p2,0]*fem_gain[tidx,ant2,p2,1]
#### additional background should be considered before converting dB changes to actual gain changes
### should really be (data - bkg) *= 1./((gain1*gain2)**0.5) + bkg
data *= 1./((gain1*gain2)**0.5)
#if tidx % 100 == 0 and ant1 == ant2:
# print 'gain change at t, ant1, ant2, pol:', tidx, ant1, ant2, polstr, (gain1*gain2)**0.5
# Look for time change, correct for power and power square
if preamble[1] != ut or init == False:
if verbose:
print 'processed ',utcount,' times\r',
ut = preamble[1]
init = True
#power, power^2, and m
if version == "1.0":
xts = uv['xtsys']
xts.shape = (nchan_in, nants)
else:
xts = uv['xsampler'] #nfreq,nants,3; we only keep nfreq,nants
xts.shape = (nchan_in, nants, 3)
xts = xts[:,:,0] # the 1st element in the 3rd dimension is power
#power FEM attn gain correction for xx
pxgain=fem_gain[tidx,:,0,0]*fem_gain[tidx,:,0,1]
xts = xts/np.abs(pxgain)
#### additional background should be considered if it is not small enough
# obtain the background (if there is a measurement)
#if utcount == 75:
# xbkg=np.copy(xts)
#if utcount < 75:
# xts[:,:15] *= 1./np.abs(pxgain)
#else:
# xts[:,:15] = (xts[:,:15]-xbkg[:,:15])*1./np.abs(pxgain) + xbkg[:,:15]
#reshape and contract along axis
xts.shape = (nch_avg, navg, nants)
xts_flag = np.zeros_like(xts) #flag to account for nonzero values
ok = np.where(xts != 0)
xts_flag[ok] = 1.0
xts_new = np.sum(xts, axis=1, dtype=np.float32)
xts_flag_new = np.sum(xts_flag, axis=1, dtype=np.float32)
ok = np.where(xts_flag_new != 0)
xts_new[ok] = xts_new[ok]/xts_flag_new[ok]
#only keep the first na (# of spectral windows) values
xts_new = xts_new[:na]
xts_new.shape = (na*nants)
uvout['ut'] = uv['ut']
uvout['xtsys'] = xts_new
#ytsys
if version == "1.0":
yts = uv['ytsys']
yts.shape = (nchan_in, nants)
else:
yts = uv['ysampler'] #nfreq,nants,3; we only keep nfreq,nants
yts.shape = (nchan_in, nants, 3)
yts = yts[:,:,0] # the 1st element in the 3rd dimension is power
#power FEM attn gain correction for yy
pygain=fem_gain[tidx,:,1,0]*fem_gain[tidx,:,1,1]
yts = yts/np.abs(pygain)
#### additional background should be considered if it is not small enough
# obtain the background (if there is a measurement)
#if utcount == 75:
# ybkg=np.copy(yts)
#if utcount < 75:
# yts[:,:15] *= 1./np.abs(pygain)
#else:
# yts[:,:15] = (yts[:,:15]-ybkg[:,:15])*1./np.abs(pygain) + ybkg[:,:15]
#reshape and contract along axis
yts.shape = (nch_avg, navg, nants)
yts_flag = np.zeros_like(yts) #flag to account for nonzero values
ok = np.where(yts != 0)
yts_flag[ok] = 1.0
yts_new = np.sum(yts, axis=1, dtype=np.float32)
yts_flag_new = np.sum(yts_flag, axis=1, dtype=np.float32)
ok = np.where(yts_flag_new != 0)
yts_new[ok] = yts_new[ok]/yts_flag_new[ok]
yts_new = yts_new[:na, :]
yts_new.shape = (na*nants)
uvout['ut'] = uv['ut']
uvout['ytsys'] = yts_new
uvout['delay'] = uv['delay']
utcount = utcount+1
#output polarization if it has changed:
if uv['pol'] != pol or init_pol == False:
init_pol = True
pol = uv['pol']
uvout['pol'] = pol
uvout['ut'] = uv['ut']
#auto- and cross-correlation data
dflag = np.zeros(len(data))
ok_data = np.where(np.absolute(data) > 0)
dflag[ok_data] = 1.0
#shape and contract
data.shape = (nch_avg, navg)
dflag.shape = (nch_avg, navg)
dataout = np.sum(data, axis=1, dtype=np.complex64)
dflagout = np.sum(dflag, axis=1)
okij = np.where(dflagout != 0)
dataout[okij] = dataout[okij]/dflagout[okij]
#only keep first na values
dataout = dataout[:na]
uvout.write(preamble, dataout)
xcount=xcount+1
del(uv) #done
#print xcount, utcount
return ufilename
|
gpl-2.0
|
stylianos-kampakis/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
235
|
3891
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
|
bsd-3-clause
|
joelagnel/trappy
|
tests/test_results.py
|
3
|
4949
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys
import shutil
import tempfile
import matplotlib
import pandas as pd
import utils_tests
sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "trappy"))
from trappy.wa import Result, get_results, combine_results
class TestResults(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestResults, self).__init__(
[("results.csv", "results.csv")],
*args, **kwargs)
def test_get_results(self):
results_frame = get_results()
self.assertEquals(type(results_frame), Result)
self.assertEquals(type(results_frame.columns), pd.core.index.MultiIndex)
self.assertEquals(results_frame["antutu"]["power_allocator"][0], 5)
self.assertEquals(results_frame["antutu"]["step_wise"][1], 9)
self.assertEquals(results_frame["antutu"]["step_wise"][2], 7)
self.assertEquals(results_frame["t-rex_offscreen"]["power_allocator"][0], 1777)
self.assertEquals(results_frame["geekbench"]["step_wise"][0], 8)
self.assertEquals(results_frame["geekbench"]["power_allocator"][1], 1)
self.assertAlmostEquals(results_frame["thechase"]["step_wise"][0], 242.0522258138)
def test_get_results_path(self):
"""get_results() can be given a directory for the results.csv"""
other_random_dir = tempfile.mkdtemp()
os.chdir(other_random_dir)
results_frame = get_results(self.out_dir)
self.assertEquals(len(results_frame.columns), 10)
def test_get_results_filename(self):
"""get_results() can be given a specific filename"""
old_path = os.path.join(self.out_dir, "results.csv")
new_path = os.path.join(self.out_dir, "new_results.csv")
os.rename(old_path, new_path)
results_frame = get_results(new_path)
self.assertEquals(len(results_frame.columns), 10)
def test_get_results_name(self):
"""get_results() optional name argument overrides the one in the results file"""
res = get_results(name="malkovich")
self.assertIsNotNone(res["antutu"]["malkovich"])
def test_combine_results(self):
res1 = get_results()
res2 = get_results()
# First split them
res1.drop('step_wise', axis=1, level=1, inplace=True)
res2.drop('power_allocator', axis=1, level=1, inplace=True)
# Now combine them again
combined = combine_results([res1, res2])
self.assertEquals(type(combined), Result)
self.assertEquals(combined["antutu"]["step_wise"][0], 4)
self.assertEquals(combined["antutu"]["power_allocator"][0], 5)
self.assertEquals(combined["geekbench"]["power_allocator"][1], 1)
self.assertEquals(combined["t-rex_offscreen"]["step_wise"][2], 424)
def test_plot_results_benchmark(self):
"""Test Result.plot_results_benchmark()
Can't test it, so just check that it doens't bomb
"""
res = get_results()
res.plot_results_benchmark("antutu")
res.plot_results_benchmark("t-rex_offscreen", title="Glbench TRex")
(_, _, y_min, y_max) = matplotlib.pyplot.axis()
trex_data = pd.concat(res["t-rex_offscreen"][s] for s in res["t-rex_offscreen"])
data_min = min(trex_data)
data_max = max(trex_data)
# Fail if the axes are within the limits of the data.
self.assertTrue(data_min > y_min)
self.assertTrue(data_max < y_max)
matplotlib.pyplot.close('all')
def test_get_run_number(self):
from trappy.wa.results import get_run_number
self.assertEquals(get_run_number("score_2"), (True, 2))
self.assertEquals(get_run_number("score"), (True, 0))
self.assertEquals(get_run_number("score 3"), (True, 3))
self.assertEquals(get_run_number("FPS_1"), (True, 1))
self.assertEquals(get_run_number("Overall_Score"), (True, 0))
self.assertEquals(get_run_number("Overall_Score_2"), (True, 1))
self.assertEquals(get_run_number("Memory_score")[0], False)
def test_plot_results(self):
"""Test Result.plot_results()
Can't test it, so just check that it doens't bomb
"""
res = get_results()
res.plot_results()
matplotlib.pyplot.close('all')
def test_init_fig(self):
r1 = get_results()
r1.init_fig()
|
apache-2.0
|
chrsrds/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
1
|
18861
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
import pytest
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator:
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert np.isfinite(samme_proba).all()
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_classification_toy(algorithm):
# Check classification on a toy dataset.
clf = AdaBoostClassifier(algorithm=algorithm, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert clf.predict_proba(T).shape == (len(T), 2)
assert clf.decision_function(T).shape == (len(T),)
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert proba.shape[1] == len(classes)
assert clf.decision_function(iris.data).shape[1] == len(classes)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert len(clf.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert (len(set(est.random_state for est in clf.estimators_)) ==
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert len(reg.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert (len(set(est.random_state for est in reg.estimators_)) ==
len(reg.estimators_))
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_staged_predict(algorithm):
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
clf = AdaBoostClassifier(algorithm=algorithm, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_probas) == 10
assert_array_almost_equal(proba, staged_probas[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(iris.data, iris.target)
assert score == score2
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(boston.data, boston.target)
assert score == score2
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert importances.shape[0] == 10
assert (importances[:3, np.newaxis] >= importances[3:]).all()
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
def test_multidimensional_X():
"""
Check that the AdaBoost estimators can work with n-dimensional
data matrix
"""
from sklearn.dummy import DummyClassifier, DummyRegressor
rng = np.random.RandomState(0)
X = rng.randn(50, 3, 3)
yc = rng.choice([0, 1], 50)
yr = rng.randn(50)
boost = AdaBoostClassifier(DummyClassifier(strategy='most_frequent'))
boost.fit(X, yc)
boost.predict(X)
boost.predict_proba(X)
boost = AdaBoostRegressor(DummyRegressor())
boost.fit(X, yr)
boost.predict(X)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_adaboost_consistent_predict(algorithm):
# check that predict_proba and predict give consistent results
# regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/14084
X_train, X_test, y_train, y_test = train_test_split(
*datasets.load_digits(return_X_y=True), random_state=42
)
model = AdaBoostClassifier(algorithm=algorithm, random_state=42)
model.fit(X_train, y_train)
assert_array_equal(
np.argmax(model.predict_proba(X_test), axis=1),
model.predict(X_test)
)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.