repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AnasGhrab/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
297
|
1247
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
|
bsd-3-clause
|
larsoner/mne-python
|
examples/decoding/plot_decoding_csp_eeg.py
|
15
|
5012
|
"""
.. _ex-decoding-csp-eeg:
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP. A
classifier is then applied to features extracted on CSP-filtered signals.
See https://en.wikipedia.org/wiki/Common_spatial_pattern and
:footcite:`Koles1991`. The EEGBCI dataset is documented in
:footcite:`SchalkEtAl2004` and is available at PhysioNet
:footcite:`GoldbergerEtAl2000`.
"""
# Authors: Martin Billinger <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, events_from_annotations
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw) # set channel names
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
###############################################################################
# Classification with linear discrimant analysis
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5)
###############################################################################
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv_split:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
|
bsd-3-clause
|
ueshin/apache-spark
|
python/pyspark/pandas/tests/test_indexops_spark.py
|
15
|
2952
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark.sql.utils import AnalysisException
from pyspark.sql import functions as F
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SparkIndexOpsMethodsTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_series_transform_negative(self):
with self.assertRaisesRegex(
ValueError, "The output of the function.* pyspark.sql.Column.*int"
):
self.psser.spark.transform(lambda scol: 1)
with self.assertRaisesRegex(AnalysisException, "cannot resolve.*non-existent.*"):
self.psser.spark.transform(lambda scol: F.col("non-existent"))
def test_multiindex_transform_negative(self):
with self.assertRaisesRegex(
NotImplementedError, "MultiIndex does not support spark.transform yet"
):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 1, 1, 1, 2, 1, 2, 2]],
)
s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
s.index.spark.transform(lambda scol: scol)
def test_series_apply_negative(self):
with self.assertRaisesRegex(
ValueError, "The output of the function.* pyspark.sql.Column.*int"
):
self.psser.spark.apply(lambda scol: 1)
with self.assertRaisesRegex(AnalysisException, "cannot resolve.*non-existent.*"):
self.psser.spark.transform(lambda scol: F.col("non-existent"))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_indexops_spark import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/animation/old_animation/histogram_tkagg.py
|
7
|
1879
|
"""
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import time
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate():
if animate.cnt>=100:
return
animate.cnt += 1
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
fig.canvas.draw()
fig.canvas.manager.window.after(100, animate)
animate.cnt = 0
fig.canvas.manager.window.after(100, animate)
plt.show()
|
mit
|
psathyrella/partis
|
bin/plot-time-required.py
|
1
|
7347
|
#!/usr/bin/env python
from collections import OrderedDict
import sys
import time
import os
from subprocess import Popen, PIPE, check_call, check_output, CalledProcessError
import argparse
import random
current_script_dir = os.path.dirname(os.path.realpath(__file__)).replace('/bin', '/python')
if not os.path.exists(current_script_dir):
print 'WARNING current script dir %s doesn\'t exist, so python path may not be correctly set' % current_script_dir
sys.path.insert(1, current_script_dir)
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--actions', required=True)
parser.add_argument('--timegrep', action='store_true')
args = parser.parse_args()
args.actions = utils.get_arg_list(args.actions)
fsdir = '/fh/fast/matsen_e/dralph/work/partis-dev/_output' + '/update-17'
# fsdir = '/fh/fast/matsen_e/processed-data/partis/clustering-paper/vollmers' #/021-018/
simfbase = 'simu-7-leaves-1.0-mutate'
simfbase_seed = 'simu-2.3-leaves-1.0-mutate-zipf'
human = '021-018'
istartstopstr_list_str = '0:250 250:750 750:1500 1500:2500 2500:4000 4000:6500 6500:9500 9500:13500 13500:18500 18500:26000 26000:36000 36000:51000 51000:71000 71000:101000' # 101000:141000 141000:191000 191000:266000 266000:366000 350000:500000 366000:516000'
istartstopstr_list_str_seed = '0:1500 1500:4500 4500:8500 8500:13500 13500:21000 21000:31000 51000:71000 71000:101000 101000:141000 141000:191000 191000:266000 266000:366000 366000:516000 516000:816000 816000:1316000 7:500007 500007:1000007 1000007:1500007 1316000:2066000 7:1000007'
istartstopstr_list = istartstopstr_list_str.split(' ')
istartstopstr_list_seed = istartstopstr_list_str_seed.split(' ')
istartstoplist = []
for istartstopstr in istartstopstr_list:
istartstoplist.append([int(iss) for iss in istartstopstr.split(':')])
n_query_list = [istartstop[1] - istartstop[0] for istartstop in istartstoplist]
istartstoplist_seed = []
for istartstopstr in istartstopstr_list_seed:
istartstoplist_seed.append([int(iss) for iss in istartstopstr.split(':')])
n_query_list_seed = [istartstop[1] - istartstop[0] for istartstop in istartstoplist_seed]
timeinfo = OrderedDict()
# NOTE edited some of these values for current code version Dec 22 2015
# NOTE I think the way this works is you use --timegrep to print the numbers, then paste those numbers into this table [then maybe edit some of them] the uncomment and run make_plots (i.e. so you don't have to wait for timegrep to finish in order to replot)
# n_query_list = [ 100, 200, 500, 1000, 1500, 2000, 3000, 5000, 7000, 10000, 12000, 15000, 20000, 30000, 50000, 75000, 100000]
# timeinfo['vollmers-0.9'] = [ 30, 34, 43, 385, 396, 217, 398, 477, None, 1241, None, 1681, 2247, None, 4153, 5590, None]
# timeinfo['mixcr'] = [ 7, 7, 8, 9, 9, 10, 10, 11, None, 13, None, None, 16, None, 20, 20, None]
# timeinfo['changeo'] = [ 4, 4, 4, 6, None, 7, None, 9, None, 15, None, None, None, None, None, None, 457]
# timeinfo['vsearch-partition'] = [ 52, 53, 62, 70, 303, 408, 460, 477, None, 1208, None, 1586, 2561, None, 11209, 11413, None]
# timeinfo['naive-hamming-partition'] = [ 33, 39, 208, 93, 170, 216, 649, 947, None, 2372, None, 4655, None, None, None, None, None]
# timeinfo['partition'] = [ 53, 67, 138, 217, 1026, 1623, 2847, 3052, None, 8228, None, 11542, 21391, 31418, None, None, None]
# ----------------------------------------------------------------------------------------
def make_plot():
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from plotting import legends, colors, linewidths, linestyles, alphas, interpolate_values
fsize = 20
mpl.rcParams.update({
# 'font.size': fsize,
'legend.fontsize': 15,
'axes.titlesize': fsize,
# 'axes.labelsize': fsize,
'xtick.labelsize': fsize,
'ytick.labelsize': fsize,
'axes.labelsize': fsize
})
# sns.set_style('ticks') # hm, it actually works here
fig, ax = plt.subplots()
fig.tight_layout()
plt.gcf().subplots_adjust(bottom=0.16, left=0.2, right=0.78, top=0.95)
plots = {}
for meth, vals in timeinfo.items():
if vals.count(None) >= len(vals) - 1:
print ' not enough vals for %s' % meth
continue
interpolate_values(n_query_list, vals)
if meth == 'seed-partition':
nql = n_query_list_seed
else:
nql = n_query_list
print '%30s %s' % ('', ''.join([('%7d' % t) for t in nql]))
print '%30s %s' % (meth, ''.join([('%7.0f' % val) if val is not None else ' n ' for val in vals]))
plots[meth] = ax.plot(nql, vals, linewidth=linewidths.get(meth, 4), label=legends.get(meth, meth), color=colors.get(meth, 'grey'), linestyle=linestyles.get(meth, 'solid'), alpha=alphas.get(meth, 1.)) #, markersize=1000)
legend = ax.legend(loc=(.04, .64)) # 'upper left')
sns.despine() #trim=True, bottom=True)
plt.xlabel('sample size')
plt.ylabel('time required')
plt.subplots_adjust(bottom=0.14, left=0.18)
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(True)
yticks = [1, 60, 3600, 86400, 604800] # seconds
yticklabels = ['1 sec', '1 min', '1 hour', '1 day', '1 week']
plt.yticks(yticks, yticklabels)
plt.savefig(os.getenv('www') + '/partis/clustering/time-required.svg')
sys.exit()
# ----------------------------------------------------------------------------------------
def get_clock_time(istart, istop, action, fbase):
# logfname = fsdir + '/' + human + '/istartstop-' + str(istart) + '-' + str(istop) + '/_logs/' + fbase + '-' + action + '.out'
logfname = fsdir + '/' + human + '/istartstop-' + str(istart) + '-' + str(istop) + '/logs/' + fbase + '-' + action + '.out'
if args.timegrep:
# check_call(['ls', '-ltrh', logfname])
try:
outstr = check_output('grep \'total time\|mixcr time\' ' + logfname, shell=True)
secs = float(outstr.split()[2])
return secs
# print '%5.0f,' % secs,
except CalledProcessError:
return None
# print '%5s,' % 'None',
# print ' %5d\n' % n_queries,
# ----------------------------------------------------------------------------------------
for action in args.actions:
aname = action
if action == 'run-viterbi':
aname = 'vollmers-0.9'
timeinfo[aname] = []
if action == 'seed-partition':
list_to_use = istartstoplist_seed
fbase = simfbase_seed
else:
list_to_use = istartstoplist
fbase = simfbase
for istart, istop in list_to_use:
timeinfo[aname].append(get_clock_time(istart, istop, action, fbase))
for meth, vals in timeinfo.items():
if meth == 'seed-partition':
nql = n_query_list_seed
else:
nql = n_query_list
print '%30s %s' % ('', ''.join([('%7d' % t) for t in nql]))
print '%30s %s' % (meth, ''.join([('%7.0f' % val) if val is not None else ' n ' for val in vals]))
if not args.timegrep:
make_plot()
|
gpl-3.0
|
paris-saclay-cds/python-workshop
|
Day_1_Scientific_Python/scikit-learn/fig_code/figures.py
|
34
|
8633
|
import numpy as np
import matplotlib.pyplot as plt
import warnings
def plot_venn_diagram():
fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[]))
ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5))
ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5))
ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black'))
ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center')
ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center')
ax.text(0.0, 0.6, '$I$', size=30)
ax.axis('equal')
def plot_example_decision_tree():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
def visualize_tree(estimator, X, y, boundaries=True,
xlim=None, ylim=None):
estimator.fit(X, y)
if xlim is None:
xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1)
if ylim is None:
ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1)
x_min, x_max = xlim
y_min, y_max = ylim
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow')
plt.clim(y.min(), y.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow')
plt.axis('off')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.clim(y.min(), y.max())
# Plot the decision boundaries
def plot_boundaries(i, xlim, ylim):
if i < 0:
return
tree = estimator.tree_
if tree.feature[i] == 0:
plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k')
plot_boundaries(tree.children_left[i],
[xlim[0], tree.threshold[i]], ylim)
plot_boundaries(tree.children_right[i],
[tree.threshold[i], xlim[1]], ylim)
elif tree.feature[i] == 1:
plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k')
plot_boundaries(tree.children_left[i], xlim,
[ylim[0], tree.threshold[i]])
plot_boundaries(tree.children_right[i], xlim,
[tree.threshold[i], ylim[1]])
if boundaries:
plot_boundaries(0, plt.xlim(), plt.ylim())
def plot_tree_interactive(X, y):
from sklearn.tree import DecisionTreeClassifier
def interactive_tree(depth=1):
clf = DecisionTreeClassifier(max_depth=depth, random_state=0)
visualize_tree(clf, X, y)
from IPython.html.widgets import interact
return interact(interactive_tree, depth=[1, 5])
def plot_kmeans_interactive(min_clusters=1, max_clusters=6):
from IPython.html.widgets import interact
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
def _kmeans_step(frame=0, n_clusters=4):
rng = np.random.RandomState(2)
labels = np.zeros(X.shape[0])
centers = rng.randn(n_clusters, 2)
nsteps = frame // 3
for i in range(nsteps + 1):
old_centers = centers
if i < nsteps or frame % 3 > 0:
dist = euclidean_distances(X, centers)
labels = dist.argmin(1)
if i < nsteps or frame % 3 > 1:
centers = np.array([X[labels == j].mean(0)
for j in range(n_clusters)])
nans = np.isnan(centers)
centers[nans] = old_centers[nans]
# plot the data and cluster centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow',
vmin=0, vmax=n_clusters - 1);
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c='black', s=50)
# plot new centers if third frame
if frame % 3 == 2:
for i in range(n_clusters):
plt.annotate('', centers[i], old_centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c='black', s=50)
plt.xlim(-4, 4)
plt.ylim(-2, 10)
if frame % 3 == 1:
plt.text(3.8, 9.5, "1. Reassign points to nearest centroid",
ha='right', va='top', size=14)
elif frame % 3 == 2:
plt.text(3.8, 9.5, "2. Update centroids to cluster means",
ha='right', va='top', size=14)
return interact(_kmeans_step, frame=[0, 50],
n_clusters=[min_clusters, max_clusters])
def plot_image_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=6, fontsize=12):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 5 + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
for i in range(0, n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + 3, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
def plot_pca_interactive(data, n_components=6):
from sklearn.decomposition import PCA
from IPython.html.widgets import interact
pca = PCA(n_components=n_components)
Xproj = pca.fit_transform(data)
def show_decomp(i=0):
plot_image_components(data[i], Xproj[i],
pca.mean_, pca.components_)
interact(show_decomp, i=(0, data.shape[0] - 1));
|
bsd-3-clause
|
lixun910/pysal
|
pysal/model/spvcm/both_levels/tests/test_generic.py
|
1
|
2084
|
from pysal.model.spvcm import both_levels as M
from pysal.model.spvcm import utils
from pysal.model.spvcm._constants import RTOL, ATOL, TEST_SEED, CLASSTYPES
from pysal.model.spvcm.tests.utils import Model_Mixin, run_with_seed
from pysal.model.spvcm.abstracts import Sampler_Mixin, Trace
import unittest as ut
import numpy as np
import pandas as pd
import os
import copy
FULL_PATH = os.path.dirname(os.path.abspath(__file__))
class Test_Generic(ut.TestCase, Model_Mixin):
def setUp(self):
super(Test_Generic, self).build_self()
self.cls = M.Generic
self.inputs['n_samples'] = 0
instance = self.cls(**self.inputs)
self.answer_trace = Trace.from_csv(FULL_PATH + '/data/generic.csv')
@ut.skip #
def test_mvcm(self):
instance = self.cls(**self.inputs)
np.random.seed(TEST_SEED)
instance.draw()
other_answers = Trace.from_csv(FULL_PATH + '/data/mvcm.csv')
strip_out = [col for col in instance.trace.varnames if col not in other_answers.varnames]
other_answers._assert_allclose(instance.trace.drop(
*strip_out, inplace=False),
rtol=RTOL, atol=ATOL)
def test_membership_delta_mismatch(self):
bad_D = np.ones(self.X.shape)
try:
self.cls(**self.inputs)
except UserWarning:
pass
def test_weights_mismatch(self):
local_input = copy.deepcopy(self.inputs)
local_input['W_'] = local_input['M']
local_input['M'] = local_input['W']
local_input['W'] = copy.deepcopy(local_input['W_'])
del local_input['W_']
try:
local_input['n_samples'] = 0
self.cls(**local_input)
except (UserWarning, AssertionError):
pass
def test_missing_membership(self):
local_input = copy.deepcopy(self.inputs)
del local_input['membership']
try:
local_input['n_samples'] = 0
self.cls(**local_input)
except UserWarning:
pass
|
bsd-3-clause
|
rajat1994/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
127
|
40813
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
BigBrother1984/android_external_chromium_org
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
24
|
10036
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
Soncrates/stock-study
|
test/testTemplate.py
|
1
|
1766
|
#!/usr/bin/python
import logging
import sys
import unittest
#import numpy.testing as np_test
#import pandas.util.testing as pd_test
import context
from libDecorators import log_on_exception, singleton
from libDebug import trace
def get_globals(*largs) :
ret = {}
for name in largs :
value = globals().get(name,None)
if value is None :
continue
ret[name] = value
return ret
@singleton
class T() :
var_names = ['ini_files','file_list']
def __init__(self) :
values = get_globals(*T.var_names)
self.__dict__.update(**values)
class TemplateTest(unittest.TestCase):
def setUp(self, delete=False):
print("setup")
def tearDown(self, delete=False):
print('tearDown')
@unittest.skipIf(sys.platform.startswith("win"), "requires Windows")
def test_01_(self) :
print((1,sys.platform))
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_02_(self) :
print((2,sys.platform))
@unittest.skip("demonstrating skipping")
def test_03_(self) :
print ("skip")
@unittest.expectedFailure
def test_fail(self):
self.assertEqual(1, 0, "broken")
if __name__ == '__main__' :
from libUtils import ENVIRONMENT
env = ENVIRONMENT.instance()
log_filename = '{pwd_parent}/log/{name}.log'.format(**vars(env))
log_msg = '%(module)s.%(funcName)s(%(lineno)s) %(levelname)s - %(message)s'
logging.basicConfig(filename=log_filename, filemode='w', format=log_msg, level=logging.INFO)
logging.basicConfig(stream=sys.stdout, format=log_msg, level=logging.INFO)
ini_list = env.list_filenames('local/*ini')
file_list = env.list_filenames('local/historical_prices/*pkl')
unittest.main()
|
lgpl-2.1
|
zooniverse/aggregation
|
analysis/old_weather2.py
|
2
|
2898
|
__author__ = 'greg'
mypath = "/home/greg/Databases/tests/"
from os import listdir
from os.path import isfile, join
import numpy as np
import cv2
import numpy
import PIL
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import math
onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) and "jpg" in f]
for fname in onlyfiles:
print fname
img = cv2.imread(mypath+fname)
ref = 101,94,65
v = []
for x,a in enumerate(img):
for y,b in enumerate(a):
d = math.sqrt(sum([(a1-b1)**2 for a1,b1 in zip(ref,b)]))
if d < 150:
# plt.plot(y,-x,'o',color="blue")
v.append((y,-x))
# plt.show()
# continue
# print img
# # gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# # ret, thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#
# cv2.imshow('image',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
#
# # noise removal
# kernel = np.ones((3,3),np.uint8)
# opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
#
# # sure background area
# sure_bg = cv2.dilate(opening,kernel,iterations=3)
#
# # Finding sure foreground area
# dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,3)
# ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
#
# # Finding unknown region
# sure_fg = np.uint8(sure_fg)
# unknown = cv2.subtract(sure_bg,sure_fg)
# cv2.imwrite("/home/greg/image_processed.png",thresh)
#
# img = PIL.Image.open("/home/greg/image_processed.png").convert("L")
# arr = numpy.array(img)
#
#
# v = []
# for i,r in enumerate(arr):
# for j,c in enumerate(r):
# if c != 0:
# v.append((i,j))
#
# plt.plot(i,j,'o')
# plt.show()
# print len(v)
# import numpy as np
X = np.asarray(v)
db = DBSCAN(eps=1, min_samples=2).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
apache-2.0
|
FMNN01/FMNN01
|
python/homework-4/task4.py
|
1
|
2319
|
#!/usr/bin/env python3
# encoding: utf-8
import numpy as np
import scipy.linalg as sl
import matplotlib.pylab as plt
M = np.array([[ 5 , 0, 0, -1],
[ 1 , 0, -2, 1],
[-1.5, 1, -2, 1],
[-1 , 3, 1, -3]])
m = M.shape[0]
def A(p):
"""
Compute the matrix A(p).
:param p: the parameter
:type p: float
"""
diag = np.eye(m, dtype=bool)
off_diag = np.logical_not(diag)
N = np.copy(M)
N[off_diag] *= p
return N
def get_color(i):
"""
Return the i:th color.
"""
colors = ['red', 'blue', 'green', 'purple']
return colors[i % len(colors)]
# Save the eigenvalues as rows in a matrix called E. Consequently each
# column has the eigenvalues originating from a given center of a
# Gerschgorin disk.
P = np.linspace(0, 1, 1000)
eigenvalues = [sl.eig(A(p))[0] for p in P]
E = np.vstack(eigenvalues)
# Plot each eigenvalue trajectory, by extracting the real and imaginary
# part of each column of E.
for i in range(m):
X = E[:,i].real
Y = E[:,i].imag
c = get_color(i)
plt.plot(X, Y, color=c)
plt.plot(X[0], Y[0], 'bo', color=c)
plt.plot(X[-1], Y[-1], 'bo', color=c)
# We want to draw circles so keep the aspect ratio equal.
ax = plt.gca()
ax.set_aspect('equal', adjustable='box')
# Draw all Gerschgorin circles and compute sensible limits for the axes.
xmin = float("inf")
xmax = float("-inf")
ymin = float("inf")
ymax = float("-inf")
for i in range(m):
# The center is the element on the diagonal.
center = (M[i,i].real, M[i,i].imag)
# The radius is the sum of the absolute value of the off-diagonal
# elements.
off_diag_on_row = np.ones(m, dtype=bool)
off_diag_on_row[i] = False
radius = sum(abs(M[i,:][off_diag_on_row]))
# Draw the circle.
circle = plt.Circle(
(M[i,i],0),
radius,
color=get_color(i),
fill=False)
ax.add_artist(circle)
# Update the axes limits so that this circle fits.
xmin = min(xmin, center[0] - radius)
xmax = max(xmax, center[0] + radius)
ymin = min(ymin, center[1] - radius)
ymax = max(ymax, center[1] + radius)
# Set the limits of the axes.
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Add a grid and save the figure.
plt.grid()
plt.savefig('task4.eps')
|
gpl-3.0
|
estnltk/textclassifier
|
textclassifier/classify.py
|
1
|
2265
|
# -*- coding: utf-8 -*-
"""Command line program for classification.
"""
from __future__ import unicode_literals, print_function, absolute_import
from .utils import read_dataset, write_dataset
from .utils import check_filename, load_classifier
import argparse
import sys
import pandas as pd
import logging
import codecs
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('classify')
parser = argparse.ArgumentParser(prog='textclassifier.classify')
parser.add_argument(
'indata',
help=('Path for the input dataset that will be classified. It is possible to load .csv and .xlsx files.'))
parser.add_argument(
'outdata',
help = 'Path where the classified dataset will be stored. It is possible to save .csv and .xlsx files')
parser.add_argument(
'model',
help='The path of the classification model.')
parser.add_argument(
'--insheet',
default=0,
help='Sheet name if reading data from Excel file (default is the first sheet).')
parser.add_argument(
'--insep',
default = ',',
help='Column separator for reading CSV files (default is ,).')
parser.add_argument(
'--outsheet',
default='Sheet1',
help='Sheet name if saving as an Excel file (default is Sheet1).')
parser.add_argument(
'--outsep',
default = ',',
help='Column separator for saving CSV files (default is ,).')
class ClassificationApp(object):
def __init__(self, args):
self._args = args
def run(self):
args = self._args
if args.indata == args.outdata:
print ('Indata and outdata point to same file. It is not allowed to minimize risk overwriting original training data')
sys.exit(0)
check_filename(args.indata)
check_filename(args.outdata)
dataframe = read_dataset(args.indata, args.insep, args.insheet)
clf = load_classifier(args.model)
logger.info('Performing classification on {0} examples.'.format(dataframe.shape[0]))
dataframe = clf.classify(dataframe)
write_dataset(args.outdata, dataframe, args.outsep, args.outsheet)
logger.info('Done!')
if __name__ == '__main__':
app = ClassificationApp(parser.parse_args())
app.run()
|
gpl-2.0
|
MelanieBittl/dolfin
|
demo/undocumented/mesh-quality/python/demo_mesh-quality.py
|
3
|
1427
|
"This demo illustrates basic inspection of mesh quality."
# Copyright (C) 2013 Jan Blechta
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2013-11-19
# Last changed:
from __future__ import print_function
from dolfin import *
# Read mesh from file
mesh = Mesh("../dolfin_fine.xml.gz")
# Print minimal and maximal radius ratio
qmin, qmax = MeshQuality.radius_ratio_min_max(mesh)
print('Minimal radius ratio:', qmin)
print('Maximal radius ratio:', qmax)
# Show histogram using matplotlib
hist = MeshQuality.radius_ratio_matplotlib_histogram(mesh)
hist = hist.replace('import pylab', 'import matplotlib\n matplotlib.use(\'Agg\')\n import pylab')
hist = hist.replace('pylab.show()', 'pylab.savefig("mesh-quality.pdf")')
print(hist)
exec(hist)
# Show mesh
plot(mesh)
interactive()
|
gpl-3.0
|
mihail911/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/table.py
|
69
|
16757
|
"""
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist
from patches import Rectangle
from cbook import is_string_like
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
artist.kwdocd['Table'] = artist.kwdoc(Table)
|
gpl-3.0
|
lorenzo-desantis/mne-python
|
examples/time_frequency/plot_source_power_spectrum.py
|
19
|
1929
|
"""
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
|
bsd-3-clause
|
yanlend/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
249
|
1563
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
|
bsd-3-clause
|
blankenberg/tools-iuc
|
tools/heinz/heinz_scoring.py
|
21
|
3661
|
#!/usr/bin/env python
"""Calculate scores for Heinz.
This script transform a p-value into a score:
1. Use alpha and lambda to calculate a threshold P-value.
2. Calculate a score based on each P-value by alpha and the threshold.
For more details, please refer to the paper doi:10.1093/bioinformatics/btn161
Input:
P-values from DESeq2 result: first column: names, second column P-values
Output:
Scores, which will be used as the input of Heinz.
First column: names, second column: scores.
Python 3 is required.
"""
# Implemented by: Chao (Cico) Zhang
# Homepage: https://Hi-IT.org
# Date: 14 Mar 2017
# Last modified: 23 May 2018
import argparse
import sys
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Transform a P-value into a '
'score which can be used as the input of '
'Heinz')
parser.add_argument('-n', '--node', required=True, dest='nodes',
metavar='nodes_pvalue.txt', type=str,
help='Input file of nodes with P-values')
parser.add_argument('-f', '--fdr', required=True, dest='fdr',
metavar='0.007', type=float, help='Choose a value of FDR')
parser.add_argument('-m', '--model', required=False, dest='param_file',
metavar='param.txt', type=str,
help='A txt file contains model params as input')
parser.add_argument('-a', '--alpha', required=False, dest='alpha',
metavar='0.234', type=float, default=0.5,
help='Single parameter alpha as input if txt input is '
'not provided')
parser.add_argument('-l', '--lambda', required=False, dest='lam',
metavar='0.345', type=float, default=0.5,
help='Single parameter lambda as input if txt input is '
'not provided')
parser.add_argument('-o', '--output', required=True, dest='output',
metavar='scores.txt', type=str,
help='The output file to store the calculated scores')
args = parser.parse_args()
# Check if the parameters are complete
if args.output is None:
sys.exit('Output file is not designated.')
if args.nodes is None:
sys.exit('Nodes with p-values must be provided.')
if args.fdr is None:
sys.exit('FDR must be provided')
if args.fdr >= 1 or args.fdr <= 0:
sys.exit('FDR must greater than 0 and smaller than 1')
# run heinz-print according to the input type
if args.param_file is not None: # if BUM output is provided
with open(args.param_file) as p:
params = p.readlines()
lam = float(params[0]) # Maybe this is a bug
alpha = float(params[1]) # Maybe this is a bug
# if BUM output is not provided
elif args.alpha is not None and args.lam is not None:
lam = args.lam
alpha = args.alpha
else: # The input is not complete
sys.exit('The parameters of the model are incomplete.')
# Calculate the threshold P-value
pie = lam + (1 - lam) * alpha
p_threshold = np.power((pie - lam * args.fdr) / (args.fdr - lam * args.fdr),
1 / (alpha - 1))
print(p_threshold)
# Calculate the scores
input_pvalues = pd.read_csv(args.nodes, sep='\t', names=['node', 'pvalue'])
input_pvalues.loc[:, 'score'] = input_pvalues.pvalue.apply(lambda x:
(alpha - 1) * (np.log(x) - np.log(p_threshold)))
# print(input_pvalues.loc[:, ['node', 'score']])
input_pvalues.loc[:, ['node', 'score']].to_csv(args.output, sep='\t',
index=False, header=False)
|
mit
|
charliememory/AutonomousDriving
|
CarND-Vehicle-Detection/src/feature.py
|
1
|
8521
|
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage.feature import hog
import os, pdb
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
# NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
def color_convert_from_RGB(img, color_space):
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV': # LUV will give negative number, hog needs non-negative
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV': # YUV will give negative number?? hog needs non-negative
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
return feature_image
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
feature_image = color_convert_from_RGB(img, color_space)
# pdb.set_trace()
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
return np.concatenate(img_features)
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(img_paths, param, data_aug=True):
# color_space='RGB', spatial_size=(32, 32),
# hist_bins=32, orient=9,
# pix_per_cell=8, cell_per_block=2, hog_channel=0,
# spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in img_paths:
file_features = []
# Read in each one by one
# image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
image = mpimg.imread(file)
# # if file.endswith('jpg') or file.endswith('jpeg'):
# # image = image.astype(np.float32)/255
# if file.endswith('png'):
# image = image.astype(np.float32)*255
# pdb.set_trace()
file_features = single_img_features(image, param['color_space'], param['spatial_size'],
param['hist_bins'], param['orient'],
param['pix_per_cell'], param['cell_per_block'], param['hog_channel'],
param['spatial_feat'], param['hist_feat'], param['hog_feat'])
features.append(file_features)
if data_aug:
for file in img_paths:
file_features = []
# Read in each one by one
image = mpimg.imread(file)
# image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
image = np.fliplr(image)
# if file.endswith('jpg') or file.endswith('jpeg'):
# image = image.astype(np.float32)/255
# if file.endswith('png'):
# image = image.astype(np.float32)*255
# pdb.set_trace()
file_features = single_img_features(image, param['color_space'], param['spatial_size'],
param['hist_bins'], param['orient'],
param['pix_per_cell'], param['cell_per_block'], param['hog_channel'],
param['spatial_feat'], param['hist_feat'], param['hog_feat'])
features.append(file_features)
# Return list of feature vectors
return features
def sample_hog_vis(car_img_path, notcar_img_path, param):
plt.subplot(1,4,1)
img = mpimg.imread(car_img_path)
# if car_img_path.endswith('png'):
# img = img.astype(np.float32)*255
# img = cv2.cvtColor(cv2.imread(car_img_path), cv2.COLOR_BGR2RGB)
plt.tight_layout()
plt.imshow(img)
plt.title('original')
feature_image = color_convert_from_RGB(img, param['color_space'])
for ch in range(3):
_, hog_image = get_hog_features(feature_image[:,:,ch], param['orient'], param['pix_per_cell'], param['cell_per_block'], vis=True, feature_vec=True)
plt.subplot(1,4,ch+2)
plt.imshow(hog_image)
plt.title('hog of channel %s'%param['color_space'][ch])
plt.savefig('car_hog.jpg', bbox_inches='tight', dpi=400)
plt.subplot(1,4,1)
img = mpimg.imread(notcar_img_path)
# if notcar_img_path.endswith('png'):
# img = img.astype(np.float32)*255
# img = cv2.cvtColor(cv2.imread(notcar_img_path), cv2.COLOR_BGR2RGB)
plt.tight_layout()
plt.imshow(img)
plt.title('original')
feature_image = color_convert_from_RGB(img, param['color_space'])
for ch in range(3):
_, hog_image = get_hog_features(feature_image[:,:,ch], param['orient'], param['pix_per_cell'], param['cell_per_block'], vis=True, feature_vec=True)
plt.subplot(1,4,ch+2)
plt.imshow(hog_image)
plt.title('hog of channel %s'%param['color_space'][ch])
plt.savefig('notcar_hog.jpg', bbox_inches='tight', dpi=400)
|
gpl-3.0
|
platinhom/ManualHom
|
Coding/Python/scipy-html-0.16.1/generated/scipy-stats-invgamma-1.py
|
1
|
1091
|
from scipy.stats import invgamma
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
a = 4.07
mean, var, skew, kurt = invgamma.stats(a, moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(invgamma.ppf(0.01, a),
invgamma.ppf(0.99, a), 100)
ax.plot(x, invgamma.pdf(x, a),
'r-', lw=5, alpha=0.6, label='invgamma pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = invgamma(a)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = invgamma.ppf([0.001, 0.5, 0.999], a)
np.allclose([0.001, 0.5, 0.999], invgamma.cdf(vals, a))
# True
# Generate random numbers:
r = invgamma.rvs(a, size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
|
gpl-2.0
|
idlead/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-validation/exp-4/pipeline_validation_patients.py
|
1
|
4076
|
"""
This pipeline is used to report the results for the ADC modality.
"""
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set(style='ticks', palette='Set2')
current_palette = sns.color_palette("Set2", 10)
from scipy import interp
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from protoclass.data_management import GTModality
from protoclass.validation import labels_to_sensitivity_specificity
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
# Sort the list of patient
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
label = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
testing_label_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
testing_label_cv.append(testing_label)
fresults = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/rf/aggregation/results.pkl'
results = joblib.load(fresults)
percentiles = np.array([1., 2., 5., 7.5, 10, 12.5, 15.])
# Create an handle for the figure
fig = plt.figure()
ax = fig.add_subplot(111)
# Go for each cross-validation iteration
for idx_cv in range(len(testing_label_cv)):
# Print the information about the iteration in the cross-validation
print 'Iteration #{} of the cross-validation'.format(idx_cv+1)
# Get the prediction
pred_score = results[3][idx_cv][0]
classes = results[3][idx_cv][1]
pos_class_arg = np.ravel(np.argwhere(classes == 1))[0]
# Compute the fpr and tpr
fpr, tpr, thresh = roc_curve(testing_label_cv[idx_cv],
pred_score[:, pos_class_arg])
ax.plot(fpr, tpr, lw=2, label=r'AUC $ = {:1.3f}$'.format(auc(fpr, tpr)))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
#plt.title(r'ADC classification using the a subset of features using feature significance')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='lower right')#,
#bbox_to_anchor=(1.4, 0.1))
# Save the plot
plt.savefig('results/exp-4/plot_all_patients.pdf',
bbox_extra_artists=(lgd,),
bbox_inches='tight')
|
mit
|
thientu/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
gtrensch/nest-simulator
|
pynest/examples/intrinsic_currents_spiking.py
|
8
|
6671
|
# -*- coding: utf-8 -*-
#
# intrinsic_currents_spiking.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
Intrinsic currents spiking
--------------------------
This example illustrates a neuron receiving spiking input through
several different receptors (AMPA, NMDA, GABA_A, GABA_B), provoking
spike output. The model, ``ht_neuron``, also has intrinsic currents
(``I_NaP``, ``I_KNa``, ``I_T``, and ``I_h``). It is a slightly simplified implementation of
neuron model proposed in [1]_.
The neuron is bombarded with spike trains from four Poisson generators,
which are connected to the AMPA, NMDA, GABA_A, and GABA_B receptors,
respectively.
References
~~~~~~~~~~
.. [1] Hill and Tononi (2005) Modeling sleep and wakefulness in the
thalamocortical system. J Neurophysiol 93:1671
http://dx.doi.org/10.1152/jn.00915.2004.
See Also
~~~~~~~~
:doc:`intrinsic_currents_subthreshold`
"""
###############################################################################
# We imported all necessary modules for simulation, analysis and plotting.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to suppress info
# messages. We also reset the kernel to be sure to start with a clean NEST.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# We define the simulation parameters:
#
# - The rate of the input spike trains
# - The weights of the different receptors (names must match receptor types)
# - The time to simulate
#
# Note that all parameter values should be doubles, since NEST expects doubles.
rate_in = 100.
w_recep = {'AMPA': 30., 'NMDA': 30., 'GABA_A': 5., 'GABA_B': 10.}
t_sim = 250.
num_recep = len(w_recep)
###############################################################################
# We create
#
# - one neuron instance
# - one Poisson generator instance for each synapse type
# - one multimeter to record from the neuron:
# - membrane potential
# - threshold potential
# - synaptic conductances
# - intrinsic currents
#
# See :doc:`intrinsic_currents_subthreshold` for more details on ``multimeter``
# configuration.
nrn = nest.Create('ht_neuron')
p_gens = nest.Create('poisson_generator', 4, params={'rate': rate_in})
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'theta',
'g_AMPA', 'g_NMDA',
'g_GABA_A', 'g_GABA_B',
'I_NaP', 'I_KNa', 'I_T', 'I_h']})
###############################################################################
# We now connect each Poisson generator with the neuron through a different
# receptor type.
#
# First, we need to obtain the numerical codes for the receptor types from
# the model. The ``receptor_types`` entry of the default dictionary for the
# ``ht_neuron`` model is a dictionary mapping receptor names to codes.
#
# In the loop, we use Python's tuple unpacking mechanism to unpack
# dictionary entries from our `w_recep` dictionary.
#
# Note that we need to pack the `pg` variable into a list before
# passing it to ``Connect``, because iterating over the `p_gens` list
# makes `pg` a "naked" node ID.
receptors = nest.GetDefaults('ht_neuron')['receptor_types']
for index, (rec_name, rec_wgt) in enumerate(w_recep.items()):
nest.Connect(p_gens[index], nrn, syn_spec={'receptor_type': receptors[rec_name], 'weight': rec_wgt})
###############################################################################
# We then connect the ``multimeter``. Note that the multimeter is connected to
# the neuron, not the other way around.
nest.Connect(mm, nrn)
###############################################################################
# We are now ready to simulate.
nest.Simulate(t_sim)
###############################################################################
# We now fetch the data recorded by the multimeter. The data are returned as
# a dictionary with entry ``times`` containing timestamps for all
# recorded data, plus one entry per recorded quantity.
# All data is contained in the ``events`` entry of the status dictionary
# returned by the multimeter.
data = mm.events
t = data['times']
###############################################################################
# The following function turns a name such as ``I_NaP`` into proper TeX code
# :math:`I_{\mathrm{NaP}}` for a pretty label.
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
###############################################################################
# The next step is to plot the results. We create a new figure, and add one
# subplot each for membrane and threshold potential, synaptic conductances,
# and intrinsic currents.
fig = plt.figure()
Vax = fig.add_subplot(311)
Vax.plot(t, data['V_m'], 'b', lw=2, label=r'$V_m$')
Vax.plot(t, data['theta'], 'g', lw=2, label=r'$\Theta$')
Vax.set_ylabel('Potential [mV]')
try:
Vax.legend(fontsize='small')
except TypeError:
Vax.legend() # work-around for older Matplotlib versions
Vax.set_title('ht_neuron driven by Poisson processes')
Gax = fig.add_subplot(312)
for gname in ('g_AMPA', 'g_NMDA', 'g_GABA_A', 'g_GABA_B'):
Gax.plot(t, data[gname], lw=2, label=texify_name(gname))
try:
Gax.legend(fontsize='small')
except TypeError:
Gax.legend() # work-around for older Matplotlib versions
Gax.set_ylabel('Conductance [nS]')
Iax = fig.add_subplot(313)
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
try:
Iax.legend(fontsize='small')
except TypeError:
Iax.legend() # work-around for older Matplotlib versions
Iax.set_ylabel('Current [pA]')
Iax.set_xlabel('Time [ms]')
|
gpl-2.0
|
DougBurke/astropy
|
astropy/visualization/wcsaxes/ticklabels.py
|
1
|
7732
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.pad = 0.3
self._exclude_overlapping = False
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def draw(self, renderer, bboxes, ticklabels_bbox):
if not self.get_visible():
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
self.set_text(self.text[axis][i])
x, y = self.pixel[axis][i]
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = - text_size * 0.5
dy = - text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = - text_size * 1.5
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = text_size * 0.5
dy = - text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = text_size * 0.2
self.set_position((x + dx, y + dy))
self.set_ha(ha)
self.set_va(va)
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * text_size * self.pad
dy += ddy * text_size * self.pad
self.set_position((x - dx, y - dy))
self.set_ha('center')
self.set_va('center')
bb = super().get_window_extent(renderer)
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
bsd-3-clause
|
Orpine/py-R-FCN
|
lib/pycocotools/coco.py
|
16
|
14881
|
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
|
mit
|
MohammedWasim/scikit-learn
|
sklearn/ensemble/gradient_boosting.py
|
12
|
69795
|
"""Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.validation import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if self.presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort='auto')
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
mit
|
chrisjsewell/ipymd
|
ipymd/plotting/animation_examples/static_contour.py
|
1
|
1429
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 21:58:15 2016
@author: cjs14
"""
'''
Demonstrate use of a log color scale in contourf
'''
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-2.0, 2.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = (bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0)
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0))
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z <= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
cs = plt.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r,alpha=0.5)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = P.contourf(X, Y, z, levs, norm=colors.LogNorm())
# The 'extend' kwarg does not work yet with a log scale.
cbar = plt.colorbar()
|
gpl-3.0
|
cancan101/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
18
|
6444
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions.enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
apache-2.0
|
warmlogic/happening
|
searchForGeo_twitter.py
|
1
|
1624
|
'''
search for geographic tweets
'''
import pandas as pd
import numpy as np
import cPickle
# # import MySQLdb as mdb
# import pymysql as mdb
import time
import twitter_tools
# from authent import dbauth as authsql
import pdb
# load beer names with >500 ratings
# sql='''
# SELECT beers.beername, beers.id
# FROM beers
# JOIN revstats ON beers.id=revstats.id
# WHERE revstats.nreviews>500;
# '''
# con=mdb.connect(**authsql)
# print 'Loading neighborhoods'
# df=pd.io.sql.read_frame(sql,con)
# beers=list(df['neighborhoods'])
# ids=list(df['id'])
# totalnum=len(beers)
# print 'Found %i beers'%totalnum
# # NB: tweets seem to come in from outside bounding box
# bayArea_bb_twit = [-122.75,36.8,-121.75,37.8] # from twitter's dev site
# bayArea_bb_me = [-122.53,36.94,-121.8,38.0] # I made this one
# searches twitter backwards in time
query = "since:2014-09-02 until:2014-09-03"
sf_center = "37.75,-122.44,4mi"
# count = 100
# results = twitter_tools.TwitSearchGeoOld(query,sf_center,count,twitter_tools.twitAPI)
count = 100
max_tweets = 1000
results = twitter_tools.TwitSearchGeo(query,sf_center,count,max_tweets,twitter_tools.twitAPI)
if len(results) > 0:
pdb.set_trace()
# # search twitter for beers and save out to dataframe
# count=0
# tweetholder=[]
# for bn in beers:
# searchstr='"'+bn+'"'
# print 'On %i of %i'%(count+1,totalnum)
# results = twittertools.TwitSearch(searchstr,twittertools.twitAPI)
# tweetholder.append(results)
# count+=1
print('Done.')
# save
# timeint = np.int(time.time())
# cPickle.dump(tweetholder,open('tweetsearch_%i.cpk'%timeint,'w'))
|
gpl-3.0
|
ligovirgo/seismon
|
seismon/bits.py
|
2
|
22194
|
#!/usr/bin/python
import os, glob, optparse, shutil, warnings, pickle, math, copy, pickle, matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal, scipy.stats
import seismon.NLNM, seismon.html
import seismon.eqmon, seismon.utils
try:
import gwpy.time, gwpy.timeseries
import gwpy.frequencyseries, gwpy.spectrogram
import gwpy.plotter
except:
print("gwpy import fails... no plotting possible.")
__author__ = "Michael Coughlin <[email protected]>"
__date__ = "2012/8/26"
__version__ = "0.1"
# =============================================================================
#
# DEFINITIONS
#
# =============================================================================
def save_data(params,channel,gpsStart,gpsEnd,data,attributeDics):
"""@saves spectral data in text files.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param gpsStart
start gps
@param gpsStart
start gps
@param gpsEnd
end gps
@param data
spectral data structure
"""
ifo = seismon.utils.getIfo(params)
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(psdDirectory)
fftDirectory = params["dirPath"] + "/Text_Files/FFT/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(fftDirectory)
timeseriesDirectory = params["dirPath"] + "/Text_Files/Timeseries/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(timeseriesDirectory)
earthquakesDirectory = params["dirPath"] + "/Text_Files/Earthquakes/" + channel.station_underscore + "/" + str(params["fftDuration"])
seismon.utils.mkdir(earthquakesDirectory)
freq = np.array(data["dataASD"].frequencies)
psdFile = os.path.join(psdDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(psdFile,"wb")
for i in range(len(freq)):
f.write("%e %e\n"%(freq[i],data["dataASD"][i]))
f.close()
freq = np.array(data["dataFFT"].frequencies)
fftFile = os.path.join(fftDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(fftFile,"wb")
for i in range(len(freq)):
f.write("%e %e %e\n"%(freq[i],data["dataFFT"].data[i].real,data["dataFFT"].data[i].imag))
f.close()
tt = np.array(data["dataLowpass"].times)
timeseriesFile = os.path.join(timeseriesDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
f = open(timeseriesFile,"wb")
f.write("%.10f %e\n"%(tt[np.argmin(data["dataLowpass"].data)],np.min(data["dataLowpass"].data)))
f.write("%.10f %e\n"%(tt[np.argmax(data["dataLowpass"].data)],np.max(data["dataLowpass"].data)))
f.close()
for attributeDic in attributeDics:
if params["ifo"] == "IRIS":
attributeDic = seismon.eqmon.ifotraveltimes(attributeDic, "IRIS", channel.latitude, channel.longitude)
traveltimes = attributeDic["traveltimes"]["IRIS"]
else:
traveltimes = attributeDic["traveltimes"][ifo]
Ptime = max(traveltimes["Ptimes"])
Stime = max(traveltimes["Stimes"])
Rtwotime = max(traveltimes["Rtwotimes"])
RthreePointFivetime = max(traveltimes["RthreePointFivetimes"])
Rfivetime = max(traveltimes["Rfivetimes"])
distance = max(traveltimes["Distances"])
indexes = np.intersect1d(np.where(tt >= Rfivetime)[0],np.where(tt <= Rtwotime)[0])
if len(indexes) == 0:
continue
indexMin = np.min(indexes)
indexMax = np.max(indexes)
ttCut = tt[indexes]
dataCut = data["dataLowpass"][indexMin:indexMax]
ampMax = np.max(dataCut.data)
ttMax = ttCut[np.argmax(dataCut.data)]
ttDiff = ttMax - attributeDic["GPS"]
velocity = distance / ttDiff
velocity = velocity / 1000.0
earthquakesFile = os.path.join(earthquakesDirectory,"%s.txt"%(attributeDic["eventName"]))
f = open(earthquakesFile,"wb")
f.write("%.10f %e %e %e %e\n"%(ttMax,ttDiff,distance,velocity,ampMax))
f.close()
def bits(params, channel, segment):
"""@calculates spectral data for given channel and segment.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param segment
[start,end] gps
"""
ifo = seismon.utils.getIfo(params)
gpsStart = segment[0]
gpsEnd = segment[1]
# set the times
duration = np.ceil(gpsEnd-gpsStart)
# make timeseries
state = seismon.utils.retrieve_bits(params, channel, segment)
flags = state.to_dqflags(round=True)
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(plotDirectory)
pngFile = os.path.join(plotDirectory,"bits.png")
#plot = gwpy.plotter.TimeSeriesPlot(figsize=[14,8])
valid={'facecolor': 'red'}
plot = state.plot(valid=valid)
#plot.ylabel = r"Velocity [$\mu$m/s]"
#plot.title = channel.station.replace("_","\_")
#plot.xlim = xlim
#plot.ylim = ylim
#plot.add_legend(loc=1,prop={'size':10})
plot.save(pngFile)
plot.close()
def freq_analysis(params,channel,tt,freq,spectra):
"""@frequency analysis of spectral data.
@param params
seismon params dictionary
@param channel
seismon channel structure
@param tt
array of start times
@param freq
frequency vector
@param spectra
spectrogram structure
"""
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore + "/freq"
seismon.utils.mkdir(plotDirectory)
indexes = np.logspace(0,np.log10(len(freq)-1),num=100)
indexes = list(np.unique(np.ceil(indexes)))
indexes = range(len(freq))
#indexes = range(16)
indexes = np.where(10.0 >= freq)[0]
deltaT = tt[1] - tt[0]
n_dist = []
for j in range(1000):
n_dist.append(scipy.stats.chi2.rvs(2))
p_chi2_vals = []
p_ks_vals = []
ttCoh_vals = []
for i in indexes:
vals = spectra[:,i]
meanPSD = np.mean(vals)
stdPSD = np.std(vals)
vals_norm = 2 * vals / meanPSD
bins = np.arange(0,10,1)
(n,bins) = np.histogram(vals_norm,bins=bins)
n_total = np.sum(n)
bins = (bins[1:] + bins[:len(bins)-1])/2
n_expected = []
for bin in bins:
expected_val = n_total * scipy.stats.chi2.pdf(bin, 2)
n_expected.append(expected_val)
n_expected = np.array(n_expected)
(stat_chi2,p_chi2) = scipy.stats.mstats.chisquare(n, f_exp=n_expected)
p_chi2_vals.append(p_chi2)
(stat_ks,p_ks) = scipy.stats.ks_2samp(vals_norm, n_dist)
p_ks_vals.append(p_ks)
acov = np.correlate(vals,vals,"full")
acov = acov / np.max(acov)
ttCov = (np.arange(len(acov)) - len(acov)/2) * float(deltaT)
#ttLimitMin = - 5/freq[i]
#ttLimitMax = 5 /freq[i]
ttLimitMin = - float('inf')
ttLimitMax = float('inf')
ttIndexes = np.intersect1d(np.where(ttCov >= ttLimitMin)[0],np.where(ttCov <= ttLimitMax)[0])
#ttCov = ttCov / (60)
acov_minus_05 = np.absolute(acov[ttIndexes] - 0.66)
index_min = acov_minus_05.argmin()
ttCoh = np.absolute(ttCov[ttIndexes[index_min]])
ttCoh_vals.append(ttCoh)
if len(ttIndexes) == 0:
continue
#if freq[i] > 0:
# continue
if params["doPlots"]:
ax = plt.subplot(111)
plt.plot(bins,n,label='true')
plt.plot(bins,n_expected,'k*',label='expected')
plt.xlabel("2 * data / mean")
plt.ylabel("Counts")
plot_title = "p-value: %f"%p_chi2
plt.title(plot_title)
plt.legend()
plt.show()
plt.savefig(os.path.join(plotDirectory,"%s_dist.png"%str(freq[i])),dpi=200)
plt.savefig(os.path.join(plotDirectory,"%s_dist.eps"%str(freq[i])),dpi=200)
plt.close('all')
ax = plt.subplot(111)
plt.semilogy(ttCov[ttIndexes],acov[ttIndexes])
plt.vlines(ttCoh,10**(-3),1,color='r')
plt.vlines(-ttCoh,10**(-3),1,color='r')
plt.xlabel("Time [Seconds]")
plt.ylabel("Correlation")
plt.show()
plt.savefig(os.path.join(plotDirectory,"%s_cov.png"%str(freq[i])),dpi=200)
plt.savefig(os.path.join(plotDirectory,"%s_cov.eps"%str(freq[i])),dpi=200)
plt.close('all')
if params["doPlots"]:
ax = plt.subplot(111)
plt.loglog(freq[indexes],p_chi2_vals,label='chi2')
plt.loglog(freq[indexes],p_ks_vals,label='k-s')
plt.xlabel("Frequency [Hz]")
plt.ylabel("p-value")
plt.legend(loc=3)
plt.show()
plt.savefig(os.path.join(plotDirectory,"freq_analysis.png"),dpi=200)
plt.savefig(os.path.join(plotDirectory,"freq_analysis.eps"),dpi=200)
plt.close('all')
ax = plt.subplot(111)
plt.semilogx(freq[indexes],ttCoh_vals)
plt.xlabel("Frequency [Hz]")
plt.ylabel("Coherence Time [s]")
plt.show()
plt.savefig(os.path.join(plotDirectory,"ttCohs.png"),dpi=200)
plt.savefig(os.path.join(plotDirectory,"ttCohs.eps"),dpi=200)
plt.close('all')
def analysis(params, channel):
"""@analysis of spectral data.
@param params
seismon params dictionary
@param channel
seismon channel structure
"""
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
files = glob.glob(os.path.join(psdDirectory,"*.txt"))
files = sorted(files)
if not params["doFreqAnalysis"]:
if len(files) > 1000:
files = files[:1000]
#files = files[-10:]
tts = []
spectra = []
for file in files:
fileSplit = file.split("/")
txtFile = fileSplit[-1].replace(".txt","")
txtFileSplit = txtFile.split("-")
thisTTStart = int(txtFileSplit[0])
thisTTEnd = int(txtFileSplit[1])
tt = thisTTStart
if tt in tts:
continue
tts.append(tt)
spectra_out = gwpy.frequencyseries.Spectrum.read(file)
spectra_out.unit = 'counts/Hz^(1/2)'
spectra.append(spectra_out)
if tt == params["gpsStart"]:
spectraNow = spectra_out
if not 'spectraNow' in locals():
print("no data at requested time... continuing\n")
return
if np.mean(spectraNow.data) == 0.0:
print("data only zeroes... continuing\n")
return
dt = tts[1] - tts[0]
epoch = gwpy.time.Time(tts[0], format='gps')
specgram = gwpy.spectrogram.Spectrogram.from_spectra(*spectra, dt=dt,epoch=epoch)
freq = np.array(specgram.frequencies)
# Define bins for the spectral variation histogram
kwargs = {'log':True,'nbins':500,'norm':True}
#kwargs = {'log':True,'nbins':500}
specvar = gwpy.frequencyseries.hist.SpectralVariance.from_spectrogram(specgram,**kwargs)
bins = specvar.bins[:-1]
specvar = specvar * 100
if params["doFreqAnalysis"]:
freq_analysis(params,channel,ttStart,freq,specgram)
# Calculate percentiles
spectral_variation_1per = specvar.percentile(1)
spectral_variation_10per = specvar.percentile(10)
spectral_variation_50per = specvar.percentile(50)
spectral_variation_90per = specvar.percentile(90)
spectral_variation_99per = specvar.percentile(99)
textDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(textDirectory)
f = open(os.path.join(textDirectory,"spectra.txt"),"w")
for i in range(len(freq)):
f.write("%e %e %e %e %e %e %e\n"%(freq[i],spectral_variation_1per[i],spectral_variation_10per[i],spectral_variation_50per[i],spectral_variation_90per[i],spectral_variation_99per[i],spectraNow[i]))
f.close()
sigDict = {}
# Break up entire frequency band into 6 segments
ff_ave = [1/float(128), 1/float(64), 0.1, 1, 3, 5, 10]
f = open(os.path.join(textDirectory,"sig.txt"),"w")
for i in range(len(ff_ave)-1):
newSpectra = []
newSpectraNow = []
newFreq = []
for j in range(len(freq)):
if ff_ave[i] <= freq[j] and freq[j] <= ff_ave[i+1]:
newFreq.append(freq[j])
newSpectraNow.append(spectraNow.data[j])
if newSpectra == []:
newSpectra = specgram.data[:,j]
else:
newSpectra = np.vstack([newSpectra,specgram.data[:,j]])
newSpectra = np.array(newSpectra)
if len(newSpectra.shape) > 1:
newSpectra = np.mean(newSpectra, axis = 0)
sig, bgcolor = seismon.utils.html_bgcolor(np.mean(newSpectraNow),newSpectra)
f.write("%e %e %e %e %s\n"%(ff_ave[i],ff_ave[i+1],np.mean(newSpectraNow),sig,bgcolor))
key = "%s-%s"%(ff_ave[i],ff_ave[i+1])
dt = tts[-1] - tts[-2]
epoch = gwpy.time.Time(tts[0], format='gps')
timeseries = gwpy.timeseries.TimeSeries(newSpectra, epoch=epoch, sample_rate=1.0/dt)
sigDict[key] = {}
#timeseries.data = np.log10(timeseries.data)
sigDict[key]["data"] = timeseries
f.close()
if params["doPlots"]:
plotDirectory = params["path"] + "/" + channel.station_underscore
seismon.utils.mkdir(plotDirectory)
fl, low, fh, high = seismon.NLNM.NLNM(2)
pngFile = os.path.join(plotDirectory,"psd.png")
plot = spectraNow.plot()
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins), np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"disp.png")
spectraNowDisplacement = spectraNow / freq
plot = spectraNowDisplacement.plot()
kwargs = {"linestyle":"-","color":"w"}
plot.add_line(freq, spectral_variation_10per/freq, **kwargs)
plot.add_line(freq, spectral_variation_50per/freq, **kwargs)
plot.add_line(freq, spectral_variation_90per/freq, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low/fl, **kwargs)
plot.add_line(fh, high/fh, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins)/np.max(freq), np.max(bins)/np.min(freq)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Displacement Spectrum [m/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"tf.png")
specgramLog = specgram.to_logf(fmin=np.min(freq),fmax=np.max(freq))
plot = specgramLog.plot()
plot.ylim = [params["fmin"],params["fmax"]]
plot.ylabel = "Frequency [Hz]"
colorbar_label = "Amplitude Spectrum [(m/s)/rtHz]"
kwargs = {}
plot.add_colorbar(location='right', log=True, label=colorbar_label, clim=None, visible=True, **kwargs)
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"psd.png")
plot = spectraNow.plot()
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins),np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"specvar.png")
kwargs = {"linestyle":"-","color":"w"}
#plot = specvar.plot(**kwargs)
plot = spectraNow.plot(**kwargs)
kwargs = {"linestyle":"-","color":"k"}
plot.add_line(freq, spectral_variation_10per, **kwargs)
plot.add_line(freq, spectral_variation_50per, **kwargs)
plot.add_line(freq, spectral_variation_90per, **kwargs)
extent = [np.min(freq), np.max(freq),
np.min(bins), np.max(bins)]
kwargs = {}
#plot.plot_variance(specvar, extent=extent, **kwargs)
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [np.min(bins), np.max(bins)]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.save(pngFile,dpi=200)
plot.close()
X,Y = np.meshgrid(freq, bins)
ax = plt.subplot(111)
#im = plt.pcolor(X,Y,np.transpose(spectral_variation_norm), cmap=plt.cm.jet)
im = plt.pcolor(X,Y,np.transpose(specvar.data), cmap=plt.cm.jet)
ax.set_xscale('log')
ax.set_yscale('log')
plt.semilogx(freq,spectraNow, 'k', label='Current')
plt.semilogx(freq,spectral_variation_10per,'w',label='10')
plt.semilogx(freq,spectral_variation_50per,'w',label='50')
plt.semilogx(freq,spectral_variation_90per,'w',label='90')
plt.loglog(fl,low,'k-.')
plt.loglog(fh,high,'k-.',label='LNM/HNM')
plt.xlim([params["fmin"],params["fmax"]])
plt.ylim([np.min(bins), np.max(bins)])
plt.xlabel("Frequency [Hz]")
plt.ylabel("Amplitude Spectrum [(m/s)/rtHz]")
plt.clim(0,5)
plt.grid()
plt.show()
plt.savefig(pngFile,dpi=200)
plt.close('all')
pngFile = os.path.join(plotDirectory,"bands.png")
plot = gwpy.plotter.TimeSeriesPlot()
for key in sigDict.iterkeys():
label = key
plot.add_timeseries(sigDict[key]["data"], label=label)
plot.axes[0].set_yscale("log")
plot.ylabel = "Average Amplitude Spectrum log10[(m/s)/rtHz]"
plot.add_legend(loc=1,prop={'size':10})
plot.save(pngFile,dpi=200)
plot.close()
htmlPage = seismon.html.seismon_page(channel,textDirectory)
if htmlPage is not None:
f = open(os.path.join(textDirectory,"psd.html"),"w")
f.write(htmlPage)
f.close()
def channel_summary(params, segment):
"""@summary of channels of spectral data.
@param params
seismon params dictionary
@param segment
[start,end] gps
"""
gpsStart = segment[0]
gpsEnd = segment[1]
data = {}
for channel in params["channels"]:
psdDirectory = params["dirPath"] + "/Text_Files/PSD/" + channel.station_underscore + "/" + str(params["fftDuration"])
file = os.path.join(psdDirectory,"%d-%d.txt"%(gpsStart,gpsEnd))
if not os.path.isfile(file):
continue
spectra_out = gwpy.frequencyseries.Spectrum.read(file)
spectra_out.unit = 'counts/Hz^(1/2)'
if np.sum(spectra_out.data) == 0.0:
continue
data[channel.station_underscore] = {}
data[channel.station_underscore]["data"] = spectra_out
if data == {}:
return
if params["doPlots"]:
plotDirectory = params["path"] + "/summary"
seismon.utils.mkdir(plotDirectory)
fl, low, fh, high = seismon.NLNM.NLNM(2)
pngFile = os.path.join(plotDirectory,"psd.png")
lowBin = np.inf
highBin = -np.inf
plot = gwpy.plotter.Plot(figsize=[14,8])
for key in data.iterkeys():
label = key.replace("_","\_")
plot.add_spectrum(data[key]["data"], label=label)
lowBin = np.min([lowBin,np.min(data[key]["data"])])
highBin = np.max([highBin,np.max(data[key]["data"])])
kwargs = {"linestyle":"-.","color":"k"}
plot.add_line(fl, low, **kwargs)
plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
plot.ylim = [lowBin, highBin]
plot.xlabel = "Frequency [Hz]"
plot.ylabel = "Amplitude Spectrum [(m/s)/rtHz]"
plot.add_legend(loc=1,prop={'size':10})
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
plot.save(pngFile,dpi=200)
plot.close()
pngFile = os.path.join(plotDirectory,"ratio.png")
lowBin = np.inf
highBin = -np.inf
ref = params["referenceChannel"].replace(":","_")
plot = gwpy.plotter.Plot(figsize=[14,8])
for key in data.iterkeys():
label = key.replace("_","\_")
plot.add_spectrum(data[key]["data"] / data[ref]["data"], label=label)
lowBin = np.min([lowBin,np.min(data[key]["data"])])
highBin = np.max([highBin,np.max(data[key]["data"])])
kwargs = {"linestyle":"-.","color":"k"}
#plot.add_line(fl, low, **kwargs)
#plot.add_line(fh, high, **kwargs)
plot.xlim = [params["fmin"],params["fmax"]]
#plot.ylim = [lowBin, highBin]
plot.xlabel = "Frequency [Hz]"
label_ref = params["referenceChannel"].replace("_","\_")
plot.ylabel = "Spectrum / Reference [%s]"%(label_ref)
plot.add_legend(loc=1,prop={'size':10})
plot.axes[0].set_xscale("log")
plot.axes[0].set_yscale("log")
plot.save(pngFile,dpi=200)
plot.close()
|
gpl-3.0
|
jaidevd/scikit-learn
|
sklearn/cluster/k_means_.py
|
19
|
59631
|
"""K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
bsd-3-clause
|
msmbuilder/msmbuilder
|
msmbuilder/utils/param_sweep.py
|
1
|
1860
|
from __future__ import print_function, division, absolute_import
from sklearn import clone
try:
from sklearn.model_selection import ParameterGrid
except ImportError:
from sklearn.grid_search import ParameterGrid
from sklearn.externals.joblib import Parallel, delayed
__all__ = ['param_sweep']
def param_sweep(model, sequences, param_grid, n_jobs=1, verbose=0):
"""Fit a series of models over a range of parameters.
Parameters
----------
model : msmbuilder.BaseEstimator
An *instance* of an estimator to be used
to fit data.
sequences : list of array-like
List of sequences, or a single sequence. Each
sequence should be a 1D iterable of state
labels. Labels can be integers, strings, or
other orderable objects.
param_grid : dict or sklearn.grid_search.ParameterGrid
Parameter grid to specify models to fit. See
sklearn.grid_search.ParameterGrid for an explanation
n_jobs : int, optional
Number of jobs to run in parallel using joblib.Parallel
Returns
-------
models : list
List of models fit to the data according to
param_grid
"""
if isinstance(param_grid, dict):
param_grid = ParameterGrid(param_grid)
elif not isinstance(param_grid, ParameterGrid):
raise ValueError("param_grid must be a dict or ParamaterGrid instance")
# iterable with (model, sequence) as items
iter_args = ((clone(model).set_params(**params), sequences)
for params in param_grid)
models = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_param_sweep_helper)(args) for args in iter_args)
return models
def _param_sweep_helper(args):
"""
helper for fitting many models on some data
"""
model, sequences = args
model.fit(sequences)
return model
|
lgpl-2.1
|
kekraft/golden_eye
|
src/pong_system/scripts/pong_system.py
|
1
|
32887
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('pong_system')
import rospy
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import copy
import sys
import time
# from Tkinter import *
import Tkinter as tk
import Image as PilImage
import ImageTk # Had to install sudo apt-get install python-imaging-tk
# IS THERE A WAY TO FORCE TARGETTING?
# HOW DO WE STEP IN MANUALLY TO DO TARGETTING?
from std_msgs.msg import String
from std_msgs.msg import Bool
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
# import vision helper functions
import os
directory = os.path.realpath(__file__)
# print directory
vision_direct = os.path.join(directory, '../../../pong_vision/src')
vision_direct = os.path.abspath(vision_direct)
# print vision_direct
sys.path.append(vision_direct)
from vision_helper import *
from image_pipeline import *
# path debugging
# try:
# user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
# for path in user_paths:
# print path
# print
# except KeyError:
# user_paths = []
class Pong_System:
def __init__(self, on_offense = False):
self.game_state = Game_State.SETUP
self.targeted = False
self.manual = True
self.img = None
######## Do calibration routine here ###########
self.init_calibration()
# init motors
self.motor_a = Motor("motor_a")
self.motor_b = Motor("motor_b")
self.motor_c = Motor("motor_c")
self.motor_a.pwm = 0
self.motor_b.pwm = 0
self.motor_c.pwm = 0
self.motor_a.speed = 0
self.motor_b.speed = 0
self.motor_c.speed = 0
# Subscribers to each subsystem state
self.game_state_sub = rospy.Subscriber('/game_side/state', String, self.state_cb)
self.launcher_state_sub = rospy.Subscriber('/launcher/state', String, self.state_cb)
self.loader_state_sub = rospy.Subscriber('/loader/state', String, self.state_cb)
self.vision_state_sub = rospy.Subscriber('/vision/state', String, self.state_cb)
# subscribe to visions output and raw feed (for the gui)
self.cup_loc_sub = rospy.Subscriber('/vision/cup_location', Vector3, self.cup_loc_cb)
self.image_feed_sub = rospy.Subscriber('/image_raw', Image, self.image_cb)
# Publishers for each topic that the subsystems listen to
# Loader cmd (True = Load, False = Do nothing)
self.loader_pub = rospy.Publisher('/loader/load_cmd', Bool, queue_size=10)
self.dryer_pub = rospy.Publisher('/dryer/cmd', Bool, queue_size=10)
# launcher velocity and pid commands
# Vector3.x = motor a speed, Vector3.y = motor b speed, Vector3.z = motor c speed
self.launcher_motor_vel_pub = rospy.Publisher('/launcher/motor_vel', Vector3, queue_size=10)
# Vector3.x = kp, Vector3.y = ki, Vector3.z = kd
self.launcher_pid_pub = rospy.Publisher('/launcher/pid_val', Vector3, queue_size=10)
# subscribe to speed output of the launcher
self.launcher_speed_sub = rospy.Subscriber('/launcher/speed', Vector3, self.speed_cb)
# subscribe to game state
# a true message sent to this topic means we are on offense and should shoot
# a false message sent to this topic means we are on defense and should do targeting
self.game_offensive_sub = rospy.Subscriber('/game_side/offense', Bool, self.game_side_cb)
######## Do inital targetting ##################
######### Set game state #######################
if on_offense:
self.game_state = Game_State.OFFENSE
else:
self.game_state = Game_State.DEFENSE
## Make sure mototrs start at correct speeds
self.update_motor_speed(self.motor_a.pwm, self.motor_b.pwm, self.motor_c.pwm)
def state_cb(self, msg):
# print out the data
print msg.data
# log the data
rospy.loginfo(msg)
def cup_loc_cb(self, msg):
''' This is called whenever a new cup position is given by the vision system.
What happens here is the pixel coords are translated to world coordinates
and that is transformed to a motor velocity. Each motor velocity is set
appropriately
Only updates if the system is in automatic mode
'''
if not self.manual:
self.cup_loc = (msg.x, msg.y, msg.x)
X = msg.x
y = msg.y
z = msg.z
# transform pixel cup location to world location
# transform world location to motor velocity
# set this to be the pixel we target
self.targeted = True
def game_side_cb(self, msg):
''' Reading True means we are on offense.
If on offense, load a ball
Reading False means we are on defense.
If on defense, go ahead and set the motors to
spin at the targeted cup
'''
print "Recieved game state message: on offense? ", msg.data
if (msg.data == True) and (self.game_state is not Game_State.SETUP):
rospy.loginfo("On offense.")
if self.targeted is False:
# force to target the cups
pass
# motors' velocities should already be set to hit targeted cup
# load ball
self.load()
self.targeted = False
self.game_state = Game_State.OFFENSE
else:
# target and start motors
if not self.targeted:
# force targetting
pass
# command motors based on target
cmd = Vector3()
cmd.x = self.motor_a.vel
cmd.y = self.motor_b.vel
cmd.z = self.motor_c.vel
self.launcher_motor_vel_pub.publish(cmd)
self.game_state = Game_State.DEFENSE
def init_calibration(self):
''' Initial calibration consists of getting the vision
routine up and running.
We could also input whether or not we are on offense or defense.
'''
# add the correct vision stuff to our path
print "do calibration"
print "are we on offense or defense?"
def load(self):
rospy.loginfo("Sending load cmd")
cmd = Bool()
cmd.data = True
self.loader_pub.publish(cmd)
# might need to delay here
print "letting screw turn for 1 second"
time.sleep(2)
# turn dryer on
self.dryer_pub.publish(cmd)
print "Turning dryer on for 3 seconds"
time.sleep(6)
# turn dryer off
cmd.data = False
self.dryer_pub.publish(cmd)
def update_motor_speed(self, motor_a_speed, motor_b_speed, motor_c_speed):
# rospy.loginfo("Updating motor speeds")
msg = 'Updating Motor Speed values to {0}, {1}, {2}.'.format(motor_a_speed, motor_b_speed, motor_c_speed)
rospy.loginfo(msg)
cmd = Vector3()
cmd.x = motor_a_speed
cmd.y = motor_b_speed
cmd.z = motor_c_speed
self.launcher_motor_vel_pub.publish(cmd)
def speed_cb(self, msg):
self.motor_a.speed = msg.x
self.motor_b.speed = msg.y
self.motor_c.speed = msg.z
def update_pid_values(self, kp, ki, kd):
msg = 'Updating PID values to {0}, {1}, {2}.'.format(kp, ki, kd)
rospy.loginfo(msg)
cmd = Vector3()
cmd.x = kp
cmd.y = ki
cmd.z = kd
self.launcher_pid_pub.publish(cmd)
def image_cb(self, img_msg):
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(img_msg, "bgr8")
# cv2.imshow('Live Feed: Raw Image', cv_image)
# cv2.waitKey(1)
self.img = cv_image
def target_pixel_location(self, x, y):
print "Set up method to target pixel (x,y)"
def shutdown(self, msg):
rospy.signal_shutdown(msg)
def calibrate_image_pipeline(self):
# make sure there is an image...wait here if there isn't one
while (self.img == None):
print "No image yet"
pass
# instantiate pipeline object
self.ip = Image_Pipeline()
self.ip.calibrate(self.img)
def target_and_show(self):
# make sure there is an image...wait here if there isn't one
while (self.img == None):
print "No image yet"
pass
# run image pipeline object
self.ip.run_pipeline(self.img)
def run_dryer(self, run_it=True):
cmd = Bool()
if run_it == True:
cmd.data = True
else:
cmd.data = False
"Updating dryer"
self.dryer_pub.publish(cmd)
class Motor:
name = "none"
speed = 0.0
pwm = 0
p = 0.0
i = 0.0
d = 0.0
def __init__(self, name, vel=0.0, p=0.0, i=0.0, d=0.0):
self.name = name
self.vel = vel
self.p = p
self.i = i
self.d = d
class Game_State:
SETUP = 0
OFFENSE = 1
DEFENSE = 2
class Application(tk.Frame):
on_offense = False
selected = False
def set_defense(self):
print "On defense..."
self.selected = True
self.on_offense = False
# self.destroy()
# self.master.destroy()
# self.master.quit()
def set_offense(self):
print "On offense..."
self.selected = True
self.on_offense = True
# self.destroy()
# self.master.destroy()
# self.master.quit()
def createWidgets(self):
# self.QUIT = Button(self)
# self.QUIT["text"] = "QUIT"
# self.QUIT["fg"] = "red"
# self.QUIT["command"] = self.quit
# self.QUIT.pack({"side": "left"})
self.offense = tk.Button(self)
self.offense["text"] = "Offense",
self.offense["command"] = self.set_offense
self.offense.pack({"side": "left"})
self.defense = tk.Button(self)
self.defense["text"] = "Defense"
self.defense["command"] = self.set_defense
self.defense.pack({"side": "left"})
def __init__(self, master=None):
tk.Frame.__init__(self, master)
master.minsize(width=250, height=250)
self.pack()
self.createWidgets()
class Select_Side():
on_offense = False
selected = False
root = None
def __init__(self):
# Creates the window and waits for a selection,
# returns selection after window is destoyed
self.create_window()
print "Selected = ", self.selected
print "On Offense = ", self.on_offense
def create_window(self):
self.root = tk.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
self.root.app = Application(master=self.root)
self.root.app.mainloop()
def on_closing(self):
print "destroying"
self.selected = self.root.app.selected
self.on_offense = self.root.app.on_offense
self.root.destroy()
#import sift
class System_GUI():
def __init__(self, img, pong_system):
self.manual_mode = True
self.game_state = Game_State.SETUP
self.dryer_on = False
# vision helper class
self.vision_helper = Vision_Helper()
self.root = tk.Tk()
self.root.resizable(0,0)
self.root.wm_title("GoldenEye v0.1")
# tl = Tkinter.Toplevel(root)
# cv2.namedWindow("Display Window", cv2.WINDOW_AUTOSIZE)
# cv2.imshow("Display Window", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
self.pong_system = pong_system
im = PilImage.fromarray(img)
self.imgtk = ImageTk.PhotoImage(image=im)
# insert image into panel
self.img_panel = tk.Label(self.root, image = self.imgtk)
self.img_panel.pack(side = "top", fill = "both", expand = "no")
self.img_panel.bind("<Button-1>", self.img_clicked_button)
# output position relative to clicks
self.click_panel = tk.Frame(self.root)
self.click_panel.pack()
self.pixel_label = tk.Label(self.click_panel)
self.pixel_label.grid(row=0,column=0)
self.pixel_label.configure(text = "Pixel")
self.pixel_value_label = tk.Label(self.click_panel)
self.pixel_value_label.grid(row=0,column=1)
self.dist_label = tk.Label(self.click_panel)
self.dist_label.grid(row=0,column=2)
self.dist_label.configure(text = "Distance")
self.dist_value_label = tk.Label(self.click_panel)
self.dist_value_label.grid(row=0,column=3)
self.lat_label = tk.Label(self.click_panel)
self.lat_label.grid(row=0,column=4)
self.lat_label.configure(text = "Lateral")
self.lat_value_label = tk.Label(self.click_panel)
self.lat_value_label.grid(row=0,column=5)
# Game State Text....need method for updating this to offense and defense
self.game_state_panel = tk.Frame(self.root)
self.game_state_panel.pack(pady = 10)
self.game_setup_text = tk.Text(self.game_state_panel, height=1, width = 8)
self.game_setup_text.config(bg='green2')
self.game_setup_text.config(fg='black')
self.game_setup_text.grid(row=0,column=0)
self.game_setup_text.insert(tk.END, " Setup")
self.game_setup_text.config(state=tk.DISABLED)
self.game_setup_text.bind('<Button-1>', self.setup_state)
self.game_offense_text = tk.Text(self.game_state_panel, height=1, width = 10)
self.game_offense_text.config(bg='gray77')
self.game_offense_text.config(fg='black')
self.game_offense_text.grid(row=0,column=1)
self.game_offense_text.insert(tk.END, " Offense")
self.game_offense_text.config(state=tk.DISABLED)
self.game_offense_text.bind('<Button-1>', self.offense_state)
self.game_defense_text = tk.Text(self.game_state_panel, height=1, width = 10)
self.game_defense_text.config(bg='gray77')
self.game_defense_text.config(fg='black')
self.game_defense_text.grid(row=0, column=2)
self.game_defense_text.insert(tk.END, " Defense")
self.game_defense_text.config(state=tk.DISABLED)
self.game_defense_text.bind('<Button-1>', self.defense_state)
####### Panel to control distance of frame and table ########
self.vision_panel = tk.Frame(self.root)
self.vision_panel.pack(pady=10)
self.dist_at_top_label = tk.Label(self.vision_panel)
self.dist_at_top_label.grid(row=0, column=0)
self.dist_at_top_label.configure(text = "Top Distance")
self.dist_at_bot_label = tk.Label(self.vision_panel)
self.dist_at_bot_label.grid(row=0, column=1)
self.dist_at_bot_label.configure(text = "Bottom Distance")
self.table_width_label = tk.Label(self.vision_panel)
self.table_width_label.grid(row=0, column=2)
self.table_width_label.configure(text = "Table Width")
self.dist_at_top_entry = tk.Spinbox(self.vision_panel, from_=0, to=10, increment=1)
self.dist_at_top_entry.grid(row=1, column=0)
self.dist_at_top_entry.config(width = 18)
self.dist_at_bot_entry = tk.Spinbox(self.vision_panel, from_=0, to=10, increment=1)
self.dist_at_bot_entry.grid(row=1, column=1)
self.dist_at_bot_entry.config(width = 18)
self.table_width_entry = tk.Spinbox(self.vision_panel, from_=0, to=10, increment=1)
self.table_width_entry.grid(row=1, column=2)
self.table_width_entry.config(width = 18)
##### Text entries for current motor speeds #######
self.cur_speed_panel = tk.Frame(self.root)
self.cur_speed_panel.pack(pady=10)
self.cur_speed_a_text = tk.Text(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_a_text.grid(row=0, column=0)
self.cur_speed_a_text.insert(tk.END, " A: Cur")
self.cur_speed_a_text.config(state=tk.DISABLED)
self.cur_speed_b_text = tk.Text(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_b_text.grid(row=0, column=1)
self.cur_speed_b_text.insert(tk.END, " B: Cur")
self.cur_speed_b_text.config(state=tk.DISABLED)
self.cur_speed_c_text = tk.Text(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_c_text.grid(row=0, column=2)
self.cur_speed_c_text.insert(tk.END, " C: Cur")
self.cur_speed_c_text.config(state=tk.DISABLED)
self.cur_speed_a_val = tk.Label(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_a_val.grid(row=1, column=0)
# self.cur_speed_a_val.insert(tk.END, "")
self.cur_speed_b_val = tk.Label(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_b_val.grid(row=1, column=1)
# self.cur_speed_b_val.insert(tk.END, "")
self.cur_speed_c_val = tk.Label(self.cur_speed_panel, height=1, width = 8)
self.cur_speed_c_val.grid(row=1, column=2)
# self.cur_speed_c_val.insert(tk.END, "")
# motor spin boxes section
motor_text_frame = tk.Frame(self.root)
motor_text_frame.pack(pady=10)
motor_a_text = tk.Text(motor_text_frame, height=1, width=22)
# motor_a_text.pack(side=tk.LEFT, ipadx=20, padx = 10)
motor_a_text.grid(row=0, column=0)
motor_a_text.insert(tk.END, "Motor A")
motor_a_text.config(state=tk.DISABLED)
motor_a_text.configure(bg='gray77')
# motor_a_text.configure(justify = tk.CENTER)
motor_b_text = tk.Text(motor_text_frame, height=1, width=22)
# motor_b_text.pack(side=tk.LEFT, ipadx=20, padx=100)
motor_b_text.grid(row=0, column=1)
motor_b_text.insert(tk.END, "Motor B")
motor_b_text.config(state=tk.DISABLED)
motor_b_text.configure(bg='gray77')
# motor_b_text.configure(justify = tk.CENTER)
motor_c_text = tk.Text(motor_text_frame, height=1, width=22)
# motor_c_text.pack(side=tk.LEFT, ipadx=20, padx = 10)
motor_c_text.grid(row=0, column=2)
motor_c_text.insert(tk.END, "Motor C")
motor_c_text.config(state=tk.DISABLED)
motor_c_text.configure(bg='gray77')
# motor_c_text.configure(justify = tk.CENTER)
# motor_box_frame = tk.Frame(self.root)
# motor_box_frame.pack()
self.motor_a_velocity_box = tk.Spinbox(motor_text_frame, from_=0, to=255, increment=1)
# self.motor_a_velocity_box.pack(ipadx=5, padx=10, pady=10, side=tk.LEFT)
self.motor_a_velocity_box.grid(row=1, column=0)
self.motor_a_velocity_box.configure(width=18)
self.motor_b_velocity_box = tk.Spinbox(motor_text_frame, from_=0, to=255, increment=1)
# self.motor_b_velocity_box.pack(ipadx=5, padx=10, pady=10, side=tk.LEFT)
self.motor_b_velocity_box.grid(row=1, column=1)
self.motor_b_velocity_box.configure(width=18)
self.motor_c_velocity_box = tk.Spinbox(motor_text_frame, from_=0, to=255, increment=1)
# self.motor_c_velocity_box.pack(ipadx= 5, padx=10, pady=10, side=tk.LEFT)
self.motor_c_velocity_box.grid(row=1, column=2)
self.motor_c_velocity_box.configure(width=18)
# Setup area to control
# forward velocity, top and right spin
relative_motor_control_frame = tk.Frame(self.root)
relative_motor_control_frame.pack(pady=10)
forward_velocity_text = tk.Text(relative_motor_control_frame, height=1, width=22)
forward_velocity_text.grid(row=0, column=0)
forward_velocity_text.insert(tk.END, "Forward Velocity")
forward_velocity_text.config(state=tk.DISABLED)
forward_velocity_text.configure(bg='gray77')
# forward_velocity_text.config(justify=tk.CENTER)
right_spin_text = tk.Text(relative_motor_control_frame, height=1, width=22)
right_spin_text.grid(row=0, column=1)
right_spin_text.insert(tk.END, "Right Spin")
right_spin_text.config(state=tk.DISABLED)
right_spin_text.configure(bg='gray77')
# right_spin_text.config(justify=tk.CENTER)
top_spin_text = tk.Text(relative_motor_control_frame, height=1, width=22)
top_spin_text.insert(tk.END, "Top Spin")
top_spin_text.grid(row=0, column=2)
top_spin_text.config(state=tk.DISABLED)
top_spin_text.configure(bg='gray77')
# top_spin_text.config(justify=tk.CENTER)
# forward velocity, top, and right spin boxes
self.forward_velocity_spinbox = tk.Spinbox(relative_motor_control_frame, from_=0, to=255, increment=1)
self.forward_velocity_spinbox.grid(row=1, column=0)
self.forward_velocity_spinbox.configure(width=18)
self.right_spin_spinbox = tk.Spinbox(relative_motor_control_frame, from_=0, to=255, increment=1)
self.right_spin_spinbox.grid(row=1,column=1)
self.right_spin_spinbox.configure(width=18)
self.top_spin_spinbox = tk.Spinbox(relative_motor_control_frame, from_=0, to=255, increment=1)
self.top_spin_spinbox.grid(row=1,column=2)
self.top_spin_spinbox.configure(width=18)
# PID value boxes
pid_section_frame = tk.Frame(self.root)
pid_section_frame.pack(pady= 20)
pid_text_frame = tk.Frame(pid_section_frame)
pid_text_frame.grid(row=0, column=0)
# pid_text_frame.grid(row=0,column=0)
# text boxes for pid
kp_text = tk.Text(pid_text_frame, height=1, width=8)
# kp_text.pack(side=tk.LEFT)
kp_text.grid(row=0, column=0)
kp_text.insert(tk.END, "P: ")
kp_text.config(state=tk.DISABLED)
kp_text.configure(bg='gray77')
ki_text = tk.Text(pid_text_frame, height=1, width=8)
# ki_text.pack(side=tk.LEFT)
ki_text.grid(row=0, column=1)
ki_text.insert(tk.END, "I: ")
ki_text.config(state=tk.DISABLED)
ki_text.configure(bg='gray77')
kd_text = tk.Text(pid_text_frame, height=1, width=8)
# kd_text.pack(side=tk.LEFT)
kd_text.grid(row=0, column=2)
kd_text.insert(tk.END, "D: ")
kd_text.config(state=tk.DISABLED)
kd_text.configure(bg='gray77')
# frame for pid buttons
pid_button_frame = tk.Frame(pid_section_frame)
# pid_button_frame.pack()
pid_button_frame.grid(row=1, column=0)
self.kp_box = tk.Spinbox(pid_button_frame, from_=0, to=10, increment=.1)
# self.kp_box.pack(side=tk.LEFT)
self.kp_box.grid(row=0, column=0)
self.kp_box.configure(width=5)
self.ki_box = tk.Spinbox(pid_button_frame, from_=0, to=10, increment=.1)
# self.kp_box.pack(side=tk.LEFT)
self.ki_box.grid(row=0, column=1)
self.ki_box.configure(width=5)
self.kd_box = tk.Spinbox(pid_button_frame, from_=0, to=10, increment=.1)
# self.kd_box.pack(side=tk.LEFT)
self.kd_box.grid(row=0, column=2)
self.kd_box.configure(width=5)
# What mode are we in, automatic or manual
self.mode_val = tk.IntVar()
self.manual_button = tk.Radiobutton(self.root, text="Manual", variable=self.mode_val, value=1, command=self.mode_change)
self.manual_button.pack()
self.automatic_button = tk.Radiobutton(self.root, text="Automatic", variable=self.mode_val, value=2, command=self.mode_change)
self.automatic_button.pack()
# insert buttons into panel
# 4 buttons: Fire, update motors, update pid, and Quit
main_button_panel = tk.Frame(self.root)
main_button_panel.pack(pady = 20)
self.fire_button = tk.Button(main_button_panel, text="Fire", width = 20)
self.update_motor_speed_button = tk.Button(main_button_panel, text="Update Motor Values", width = 20)
self.update_pid_button = tk.Button(main_button_panel, text="Update PID", width = 20)
self.quit_button = tk.Button(main_button_panel, text="Quit", width = 20)
# self.fire_button.pack(fill=tk.X, side=tk.TOP)
# self.update_motor_speed_button.pack(fill=tk.X, side=tk.BOTTOM)
# self.update_pid_button.pack(fill=tk.X, side=tk.BOTTOM)
# self.quit_button.pack(fill=tk.X, side=tk.TOP)
self.fire_button.grid(row=1,column=1, padx =5, pady = 5)
self.update_motor_speed_button.grid(row=0,column=1, padx=5, pady = 5)
self.update_pid_button.grid(row=0,column=0, padx =5, pady = 5)
self.quit_button.grid(row=1, column=0, padx =5, pady = 5)
self.fire_button.bind('<Button-1>', self.fire)
self.update_motor_speed_button.bind('<Button-1>', self.update_motor_speed)
self.update_pid_button.bind('<Button-1>', self.update_pid_values)
self.quit_button.bind('<Button-1>', self.quit)
self.root.bind("<Return>", self.fire)
self.root.bind("p", self.update_img)
self.root.bind("c", self.calibrate)
self.root.bind("t", self.target)
self.root.bind("s", self.save_img)
self.root.bind("d", self.alt_dryer)
# # display updated picture occasionally
self.root.after(100, self.update_img)
# # display updated speed occasionally
self.root.after(100, self.update_speeds_label)
# # display game state occassionally
self.root.after(100, self.update_game_state_label)
def calibrate(self, arg):
## most of this method should really reside in pong system
# camera calibration
# select four corners of image to do translation points
corner_calibrate = Calibrate_Pixel_2_World()
img = self.pong_system.img
if img != None:
corner_calibrate.start_calibration(img)
self.vision_helper.top_cup_left_row = corner_calibrate.top_cup_left_row
self.vision_helper.top_cup_left_col = corner_calibrate.top_cup_left_col
self.vision_helper.top_cup_right_row = corner_calibrate.top_cup_right_row
self.vision_helper.top_cup_right_col = corner_calibrate.top_cup_right_col
self.vision_helper.bot_cup_left_row = corner_calibrate.bot_cup_left_row
self.vision_helper.bot_cup_left_col = corner_calibrate.bot_cup_left_col
self.vision_helper.bot_cup_right_row = corner_calibrate.bot_cup_right_row
self.vision_helper.bot_cup_right_col = corner_calibrate.bot_cup_right_col
## now do image pipeline calibration
self.pong_system.calibrate_image_pipeline()
def start_gui(self):
# update image
self.update_img()
# select manual mode
self.manual_button.select()
# start the GUI
self.root.mainloop()
def fire(self, arg):
''' To really tie this in, we need to be able to publish the motor commands'''
print 'Fire'
# motor_a_speed = float(self.motor_a_velocity_box.get())
# print 'Motor a Speed', motor_a_speed
self.pong_system.load()
def update_pid_values(self, arg):
# get values from spin boxes
kp = float(self.kp_box.get())
ki = float(self.ki_box.get())
kd = float(self.kd_box.get())
# send ros command to controller
self.pong_system.update_pid_values(kp, ki, kd)
def update_motor_speed(self, arg):
# get values from spin boxes
motor_a_speed = float(self.motor_a_velocity_box.get())
motor_b_speed = float(self.motor_b_velocity_box.get())
motor_c_speed = float(self.motor_c_velocity_box.get())
# Send ros command to controller
self.pong_system.update_motor_speed(motor_a_speed, motor_b_speed, motor_c_speed)
def quit(self, arg):
self.pong_system.update_motor_speed(0, 0, 0)
print 'Quit'
time.sleep(.5)
self.pong_system.shutdown("Quit pressed in GUI")
self.root.destroy()
def mode_change(self):
# print 'Selection changed to: ', self.mode_val.get()
if self.mode_val.get() == 1:
self.manual_mode = True
self.pong_system.manual = True
else:
self.manual_mode = False
self.pong_system.manual = False
def update_img(self, arg=None):
# get image from pong system class
img = self.pong_system.img
if img is not None:
# convert image to be friendly with tkinter
# rearrange color channel
# print arg
# self.root.after(4000, self.update_img)
b,g,r = cv2.split(img)
img = cv2.merge((r,g,b))
im = PilImage.fromarray(img)
self.imgtk = ImageTk.PhotoImage(image=im)
self.img_panel.configure(image=self.imgtk)
def update_speeds_label(self, arg=None):
# get speeds from motors
motor_a_speed = self.pong_system.motor_a.speed
motor_b_speed = self.pong_system.motor_b.speed
motor_c_speed = self.pong_system.motor_c.speed
motor_a_speed_str = '{0:.2f}'.format(motor_a_speed)
motor_b_speed_str = '{0:.2f}'.format(motor_b_speed)
motor_c_speed_str = '{0:.2f}'.format(motor_c_speed)
self.cur_speed_a_val.configure(text = motor_a_speed_str)
self.cur_speed_b_val.configure(text = motor_a_speed_str)
self.cur_speed_c_val.configure(text = motor_a_speed_str)
# # display updated speed occasionally
self.root.after(100, self.update_speeds_label)
def update_game_state_label(self, arg=None):
self.game_state = self.pong_system.game_state
# print "Game State"
# print self.game_state
if self.game_state == Game_State.OFFENSE:
self.offense_state()
elif self.game_state == Game_State.DEFENSE:
self.defense_state()
else:
self.setup_state()
self.root.after(100, self.update_game_state_label)
def img_clicked_button(self, arg):
''' If the system is in manual mode, then take the click and
aim for the selected alrea. Otherwise, ignore the click.
'''
row = arg.y
col = arg.x
if self.manual_mode:
# Turn the canvas click points into points that are relative to the image location
# print "im size", self.imgtk.
canvas_width = self.img_panel.winfo_width()
canvas_height = self.img_panel.winfo_height()
# img_width, img_height = self.pong_system.img.shape
img_width = self.imgtk.width()
img_height = self.imgtk.height()
# canvas width minus image width
x_offset = (canvas_width - img_width) / 2
y_offset = (canvas_height - img_height) / 2
# click relative to img pixel
x_img_pixel = arg.x - x_offset
y_img_pixel = arg.y - y_offset
# print "panel clicked"
# print '({0},{1})'.format(arg.x, arg.y)
# print "canvas width: ", canvas_width
# print "canvas height: ", canvas_height
# print "Image width: ", img_width
# print "Image height: ", img_height
# print "x offset: ", x_offset
# print "y offset: ", y_offset
# print "X pixel on image: ", x_img_pixel
# print "Y pixel on image: ", y_img_pixel
# print
# print
# feed the information to the pong system
# output info to gui
# pixel
pixel_value_string = '({0}, {1}) '.format(row, col)
self.pixel_value_label.configure(text = pixel_value_string)
# get lateral and dist value
dist, lateral = self.vision_helper.calc_position_from_pixel(row, col)
self.lat_value_label.configure(text = str(lateral))
self.dist_value_label.configure(text = str(dist))
def setup_state(self, arg=None):
# print " setup "
self.game_setup_text.config(bg='green2')
self.game_offense_text.config(bg='gray77')
self.game_defense_text.config(bg='gray77')
self.game_state = Game_State.SETUP
def offense_state(self, arg=None):
# print "offense "
self.game_setup_text.config(bg='gray77')
self.game_offense_text.config(bg='green2')
self.game_defense_text.config(bg='gray77')
self.game_state = Game_State.OFFENSE
# if self.manual_mode != True:
# it's time to switch modes
# enable the fire button
self.fire_button.config(state=tk.NORMAL)
def defense_state(self, arg=None):
# print "defense"
self.game_setup_text.config(bg='gray77')
self.game_offense_text.config(bg='gray77')
self.game_defense_text.config(bg='green2')
self.game_state = Game_State.DEFENSE
# disable the fire button
self.fire_button.config(state=tk.DISABLED)
def target(self, arg=None):
# display automatic targetting sequence
self.pong_system.target_and_show()
def save_img(self, arg=None):
# save the current cameras image to disk
# print "Saving image"
directory = os.path.realpath(__file__)
# print directory
img_path = os.path.join(directory, '../pong_system_img.jpg')
img_path = os.path.abspath(img_path)
img = self.pong_system.img
cv2.imwrite(img_path, img)
# print "image saved"
def alt_dryer(self, arg=None):
print "alternating dryer"
self.pong_system.run_dryer(self.dryer_on)
self.dryer_on = not self.dryer_on
def main():
# Have the user dictate whether or not they are on offense
#side = Select_Side()
#selected = side.selected
#assert(selected)
#on_offense = side.on_offense
rospy.init_node('pong_system')
# pong = Pong_System(on_offense)
pong = Pong_System(on_offense=True)
start_system_gui(pong)
# not needed because Tkinter is already doing a loop called main loop
# could loop here endlessly and call it root.update
# see http://stackoverflow.com/questions/459083/how-do-you-run-your-own-code-alongside-tkinters-event-loop
# rospy.spin()
def start_system_gui(pong_system):
img = cv2.imread("saved.jpg", 1)
b,g,r = cv2.split(img)
img = cv2.merge((r,g,b))
gui = System_GUI(img, pong_system)
gui.start_gui()
if __name__ == '__main__':
main()
|
bsd-2-clause
|
grollins/calm
|
calm/test_data/plot_kmeans_output.py
|
1
|
1401
|
import pandas
import pylab
import matplotlib.pyplot as plt
from calm.pandas_time_series import PandasTimeSeries
s1 = PandasTimeSeries()
s2 = PandasTimeSeries()
s1.load_csv('noisy_simple_3state.csv')
s2.load_csv('output_from_kmeans_test.csv')
clust0_inds = s2.get_indices_where_lt(1.0)
clust1_inds = s2.get_indices_where_geq(1.0)
pylab.plot(s1.series.index, s1.series, 'k', lw=2)
pylab.plot(s1.series.index[clust0_inds], s1.series[clust0_inds], 'bo', ms=5)
pylab.plot(s1.series.index[clust1_inds], s1.series[clust1_inds], 'ro', ms=5)
pylab.show()
pylab.clf()
pylab.plot(s2.series.index, s2.series, 'k', lw=2)
pylab.plot(s2.series.index[clust0_inds], s2.series[clust0_inds], 'bo', ms=5)
pylab.plot(s2.series.index[clust1_inds], s2.series[clust1_inds], 'ro', ms=5)
pylab.ylim(-0.2, 1.2)
pylab.show()
pylab.clf()
s1 = PandasTimeSeries()
s2 = PandasTimeSeries()
s1.load_csv('spo_data.csv')
s2.load_csv('spo_output_from_kmeans_test.csv')
clust0_inds = s2.get_indices_where_lt(1.0)
clust1_inds = s2.get_indices_where_geq(1.0)
pylab.plot(s1.series.index[clust0_inds], s1.series[clust0_inds], 'ko', ms=3)
pylab.plot(s1.series.index[clust1_inds], s1.series[clust1_inds], 'ro', ms=3)
pylab.show()
pylab.clf()
pylab.plot(s2.series.index[clust0_inds], s2.series[clust0_inds], 'ko', ms=3)
pylab.plot(s2.series.index[clust1_inds], s2.series[clust1_inds], 'ro', ms=3)
pylab.ylim(-0.2, 1.2)
pylab.show()
pylab.clf()
|
bsd-2-clause
|
harshaneelhg/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
244
|
7588
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
Ized06/GID_Internal
|
client/examples/ipynb_util.py
|
3
|
1742
|
import os, sys, time, re
import numpy as np
import matplotlib.pyplot as plt
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
def plot_object_color(object_list, color_mapping):
N = len(object_list)
object_id = 1
for object_name in object_list:
color = color_mapping[object_name]
plt.subplot(1,N,object_id)
plot_color(color, object_name)
object_id += 1
def generate_objectcatetory_json(scene_objects):
# Use http://www.jsoneditoronline.org/ to clean the json
# http://jsonformat.com/#jsondataurllabel
''' Get object category from object name, with some manual editing '''
print '{'
for obj in scene_objects:
objtype = obj.replace('SM_', '').split('_')[0].replace('BookLP', 'Book').replace('Wire1', 'Wire')
print ' ', repr(obj), ':', repr(objtype), ','
print '}'
def check_coverage(dic_instance_mask):
''' Check the portion of labeled image '''
marked_region = None
for object_name in dic_instance_mask.keys():
instance_mask = dic_instance_mask[object_name]
if marked_region is None:
marked_region = np.zeros(instance_mask.shape[0:2])
marked_region += instance_mask
assert(marked_region.max() == 1)
if marked_region.max() > 1:
print 'There are invalid regions in the labeling'
coverage = float(marked_region.sum()) / (marked_region.shape[0] * marked_region.shape[1])
print 'Coverage %.2f' % coverage
return marked_region
|
mit
|
mojaie/kiwiii-server
|
kiwiii/stats/graphstats.py
|
1
|
9870
|
#
# (C) 2014-2017 Seiji Matsuoka
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from collections import Counter
import math
import networkx as nx
import pandas as pd
import community
from kiwiii.stats import graphgen
def graph_stats(G, part_label="partition"):
""" Calculate network and clustering properties
Args:
G: networkx.Graph
Returns:
dict of parameters(clustering coefficient, assortativity
maximum modularity, isolated node ratio, precision,
recall, F-measure)
"""
partition = {n: node[part_label] for n, node in G.nodes_iter(data=True)}
# network properties
cc = nx.average_clustering(G)
tr = nx.transitivity(G)
apls = 0
for g in nx.connected_component_subgraphs(G):
for node in g:
path_length = nx.single_source_dijkstra_path_length(g, node)
apls += sum(path_length.values())
apl = apls / (len(G) * (len(G) - 1))
if G.number_of_edges() > 3 and nx.density(G) < 1:
assort = nx.degree_assortativity_coefficient(G)
else:
assort = None
noniso = 1 - (sum(1 for _, d in G.degree_iter() if not d) / len(G))
if G.number_of_edges():
modu = community.modularity(partition, G)
else:
modu = None
# Precision, recall and F-measure
inner_e = 0
for u, v in G.edges_iter():
if G.node[u][part_label] == G.node[v][part_label]:
inner_e += 1
cluster_cnt = Counter(partition.values())
inner_pe = sum([v * (v - 1) / 2 for v in cluster_cnt.values()])
try:
prec = inner_e / G.number_of_edges()
except ZeroDivisionError:
prec = None
try:
recall = inner_e / inner_pe
except ZeroDivisionError:
recall = None
if prec is not None and recall is not None:
fm = 2 * prec * recall / (prec + recall)
else:
fm = None
return {
"cc": cc,
"transit": tr,
"assort": assort,
"mod": modu,
"noniso": noniso,
"prec": prec,
"recall": recall,
"fm": fm,
"apl": apl
}
def random_graph_stats(size, density, nrand=5, cache=False):
ress = []
run = None
for i in range(nrand):
if cache:
run = i + 1
H = graphgen.random_graph(size, density, run)
stats = graph_stats(H)
ress.append(stats)
dt = pd.DataFrame(ress)
rnd = {
"cc": dt["cc"].median(),
"transit": dt["transit"].median(),
"assort": dt["assort"].median(),
"mod": dt["mod"].median(),
"noniso": dt["noniso"].median(),
"prec": dt["prec"].median(),
"recall": dt["recall"].median(),
"fm": dt["fm"].median(),
"apl": dt["apl"].median()
}
for k, v in rnd.items(): # float("nan") -> None
if math.isnan(v):
rnd[k] = None
return rnd
def entropy(G, label, weight_label=None, cutoff=0):
""" Returns information entropy of the cluster
Args:
G: graph
records: mapping records [{class: name, members: [list]},]
weight_label: if specified, entropy contribution is reduced
by the weight factor.
"""
records = graphgen.group_records(G, label, cutoff)
wl = weight_label
h = 0
for rcd in records:
size = len(rcd["members"])
if not size:
continue
p = size / G.number_of_nodes()
if wl is not None:
w = sum(G.node[m][wl] for m in rcd["members"]) / size
plogp = p * math.log2(p) * w
else:
plogp = p * math.log2(p)
h -= plogp
return h, records
def agreement_stats(G, truth_label, comm_label,
truth_weight=None, truth_cutoff=0):
""" Calculate fuzzy NMI and V-measure
Args:
G: graph
comm: community class label
truth: ground truth class label
weight: weight factor label of ground truth class
Returns:
results(dict):
community entropy, truth class entropy,
mutual information and normalized mutual information(NMI),
homogeneity, completeness and V-measure
"""
ec, comm = entropy(G, comm_label)
et, truth = entropy(G, truth_label, truth_weight, truth_cutoff)
N = G.number_of_nodes()
mi = 0 # Mutual information
etc = 0 # Conditional entropy truth/comm
ect = 0 # Conditional entropy comm/truth
for c in comm:
for t in truth:
isec = set(c["members"]) & set(t["members"])
isize = len(isec)
if not isize:
continue
if truth_weight is not None:
w = sum(G.node[m][truth_weight] for m in isec) / isize
else:
w = 1
p = isize * N / (len(c["members"]) * len(t["members"]))
plogp = isize / N * math.log2(p)
mi += plogp * w
tplogp = isize / N * math.log2(isize / len(c["members"]))
cplogp = isize / N * math.log2(isize / len(t["members"]))
etc -= tplogp * w
ect -= cplogp * w
nmi = mi / math.sqrt(et * ec) # Normalized mutual information
homo = 1 - (etc / et) # Homogeneity
comp = 1 - (ect / ec) # Completeness
v = 2 * homo * comp / (homo + comp) # V-measure
return {
"h_comm": ec,
"h_truth": et,
"h_ct": ect,
"h_tc": etc,
"mi": mi,
"nmi": nmi,
"homo": homo,
"comp": comp,
"vm": v
}
def agreement_distribution(G, truth_label, comm_label, truth_cutoff=0):
""" Distribution of mutual information
Args:
G: graph
comm: community class label
truth: ground truth class label
truth_cutoff: cutoff of truth class size
Returns:
results(dict): {truth class: mutual information}
"""
ec, comm = entropy(G, comm_label)
et, truth = entropy(G, truth_label, None, truth_cutoff)
N = G.number_of_nodes()
dist = []
for t in truth:
mi = 0
for c in comm:
isec = set(c["members"]) & set(t["members"])
isize = len(isec)
if not isize:
continue
p = isize * N / (len(c["members"]) * len(t["members"]))
plogp = isize / N * math.log2(p)
mi += plogp
dist.append({"class": t["class"], "mi": mi})
return dist
def adjusted_agreement_stats(G, stats, truth_label, comm_label,
truth_weight, truth_cutoff,
nrand=5, cache=False):
""" Calculate Adjusted Mutual Information(AMI)
Args:
G: graph
stats: results dict of agreement_stats
truth_label: node attribute of ground truth class
comm_label: node attribute of assigned community
truth_weight: node attribute of weight factor of ground truth class
truth_cutoff: minimum truth class size
nrand: number of randam graph samples to generate
Returns:
adj(dict): adjusted NMI and adjusted V-measure
rnd(dict): median of random graph properties
"""
ress = []
run = None
for i in range(nrand):
if cache:
run = i + 1
H = graphgen.random_graph(len(G), stats["density"], run)
# assign G attributes to H except "partition"
pt = {n: node["partition"] for n, node in H.nodes_iter(data=True)}
H.node = G.node
for n in H.nodes_iter():
H.node[n]["partition"] = pt[n]
# stats
res = graph_stats(H)
res.update(agreement_stats(
H, truth_label, comm_label, truth_weight, truth_cutoff))
ress.append(res)
dt = pd.DataFrame(ress)
emi = dt["mi"].median()
enmi = emi / math.sqrt(stats["h_comm"] * stats["h_truth"])
ehomo = dt["homo"].median()
ecomp = dt["comp"].median()
evm = 2 * ehomo * ecomp / (ehomo + ecomp)
rnd = {
"cc": dt["cc"].median(),
"transit": dt["transit"].median(),
"assort": dt["assort"].median(),
"mod": dt["mod"].median(),
"noniso": dt["noniso"].median(),
"prec": dt["prec"].median(),
"recall": dt["recall"].median(),
"fm": dt["fm"].median(),
"apl": dt["apl"].median(),
"homo": ehomo,
"comp": ecomp,
"mi": emi,
"nmi": enmi,
"vm": evm
}
for k, v in rnd.items(): # float("nan") -> None
if math.isnan(v):
rnd[k] = None
# Adjusted mutual information
try:
ami = (stats["nmi"] - enmi) / (1 - enmi)
except ZeroDivisionError:
ami = None
# Adjusted V-measure
try:
avm = (stats["vm"] - evm) / (1 - evm)
except ZeroDivisionError:
avm = None
adj = {"ami": ami, "avm": avm}
return adj, rnd
def random_graph_exp_mi(comm, truth, size, weight):
"""Deprecated"""
N = size
total = 0
g = math.lgamma
for c in comm:
for t in truth:
a = len(c["members"])
b = len(t["members"])
start = max([0, a + b - N]) + 1
end = min([a, b]) + 1
e = 0
for nij in range(start, end):
front = nij / N * math.log2(N * nij / a / b)
above = g(a+1)+g(b+1)+g(N-a+1)+g(N-b+1)
below = g(N+1)+g(nij+1)+g(a-nij+1)+g(b-nij+1)+g(N-a-b+nij+1)
exp_weight = sum([weight[n] for n in t["members"]]) / b * nij
e += front * math.exp(above - below) * exp_weight
total += e
return total
def fuzzy_ami_exp(mi, comm, truth, size, weight):
"""Deprecated"""
exp = random_graph_exp_mi(comm, truth, size, weight)
sqrth = math.sqrt(mi["h_comm"] * mi["h_truth"])
ami = (mi["mi"] - exp) / (sqrth - exp)
return {
"exp_mi": exp,
"ami": ami
}
|
mit
|
hassaanm/stock-trading
|
pybrain-pybrain-87c7ac3/examples/rl/valuebased/nfq.py
|
4
|
1841
|
#!/usr/bin/env python
__author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.rl.environments.cartpole import CartPoleEnvironment, DiscreteBalanceTask, CartPoleRenderer
from pybrain.rl.agents import LearningAgent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.learners.valuebased import NFQ, ActionValueNetwork
from pybrain.rl.explorers import BoltzmannExplorer
from numpy import array, arange, meshgrid, pi, zeros, mean
from matplotlib import pyplot as plt
# switch this to True if you want to see the cart balancing the pole (slower)
render = False
plt.ion()
env = CartPoleEnvironment()
if render:
renderer = CartPoleRenderer()
env.setRenderer(renderer)
renderer.start()
module = ActionValueNetwork(4, 3)
task = DiscreteBalanceTask(env, 100)
learner = NFQ()
learner.explorer.epsilon = 0.4
agent = LearningAgent(module, learner)
testagent = LearningAgent(module, None)
experiment = EpisodicExperiment(task, agent)
def plotPerformance(values, fig):
plt.figure(fig.number)
plt.clf()
plt.plot(values, 'o-')
plt.gcf().canvas.draw()
performance = []
if not render:
pf_fig = plt.figure()
while(True):
# one learning step after one episode of world-interaction
experiment.doEpisodes(1)
agent.learn(1)
# test performance (these real-world experiences are not used for training)
if render:
env.delay = True
experiment.agent = testagent
r = mean([sum(x) for x in experiment.doEpisodes(5)])
env.delay = False
testagent.reset()
experiment.agent = agent
performance.append(r)
if not render:
plotPerformance(performance, pf_fig)
print "reward avg", r
print "explorer epsilon", learner.explorer.epsilon
print "num episodes", agent.history.getNumSequences()
print "update step", len(performance)
|
apache-2.0
|
ishanic/scikit-learn
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
248
|
6359
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
bsd-3-clause
|
thunderhoser/GewitterGefahr
|
gewittergefahr/plotting/saliency_plotting.py
|
1
|
17704
|
"""Plots saliency maps."""
import numpy
from matplotlib import pyplot
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.plotting import plotting_utils
DEFAULT_CONTOUR_WIDTH = 2
WIND_NAME = 'wind_m_s01'
WIND_COMPONENT_NAMES = [soundings.U_WIND_NAME, soundings.V_WIND_NAME]
WIND_BARB_LENGTH = 10.
EMPTY_WIND_BARB_RADIUS = 0.2
WIND_SALIENCY_MULTIPLIER = 52.5
FIELD_NAME_TO_LATEX_DICT = {
soundings.SPECIFIC_HUMIDITY_NAME: r'$q_{v}$',
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME: r'$\theta_{v}$',
soundings.TEMPERATURE_NAME: r'$T$',
soundings.RELATIVE_HUMIDITY_NAME: 'RH',
soundings.U_WIND_NAME: r'$u$',
soundings.V_WIND_NAME: r'$v$',
soundings.PRESSURE_NAME: r'$p$',
WIND_NAME: 'Wind'
}
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
SOUNDING_SALIENCY_BACKGROUND_COLOUR = numpy.array(
[166, 206, 227], dtype=float
) / 255
DEFAULT_MIN_FONT_SIZE = 10.
DEFAULT_MAX_FONT_SIZE = 25.
DEFAULT_MIN_SOUNDING_FONT_SIZE = 24.
DEFAULT_MAX_SOUNDING_FONT_SIZE = 60.
def _saliency_to_colour_and_size(
saliency_matrix, colour_map_object, max_absolute_colour_value,
min_font_size, max_font_size):
"""Returns colour and font size for each saliency value.
:param saliency_matrix: numpy array (any shape) of saliency values.
:param colour_map_object: See doc for `plot_2d_grid`.
:param max_absolute_colour_value: Same.
:param min_font_size: Same.
:param max_font_size: Same.
:return: rgb_matrix: numpy array of colours. If dimensions of
`saliency_matrix` are M x N, this will be M x N x 3. In general, number
of dimensions will increase by 1 and length of last axis will be 3
(corresponding to R, G, and B values).
:return: font_size_matrix: numpy array of font sizes (same shape as
`saliency_matrix`).
"""
error_checking.assert_is_geq(max_absolute_colour_value, 0.)
max_absolute_colour_value = max([max_absolute_colour_value, 0.001])
error_checking.assert_is_greater(min_font_size, 0.)
error_checking.assert_is_greater(max_font_size, min_font_size)
colour_norm_object = pyplot.Normalize(
vmin=0., vmax=max_absolute_colour_value)
rgb_matrix = colour_map_object(colour_norm_object(
numpy.absolute(saliency_matrix)
))[..., :-1]
normalized_saliency_matrix = (
numpy.absolute(saliency_matrix) / max_absolute_colour_value
)
normalized_saliency_matrix[normalized_saliency_matrix > 1.] = 1.
font_size_matrix = (
min_font_size + normalized_saliency_matrix *
(max_font_size - min_font_size)
)
return rgb_matrix, font_size_matrix
def plot_saliency_for_sounding(
saliency_matrix, sounding_field_names, pressure_levels_mb,
colour_map_object, max_absolute_colour_value,
min_font_size=DEFAULT_MIN_SOUNDING_FONT_SIZE,
max_font_size=DEFAULT_MAX_SOUNDING_FONT_SIZE):
"""Plots saliency for one sounding.
P = number of pressure levels
F = number of fields
:param saliency_matrix: P-by-F numpy array of saliency values.
:param sounding_field_names: length-F list of field names.
:param pressure_levels_mb: length-P list of pressure levels (millibars).
:param colour_map_object: See doc for `plot_2d_grid`.
:param max_absolute_colour_value: Same.
:param min_font_size: Same.
:param max_font_size: Same.
"""
error_checking.assert_is_geq(max_absolute_colour_value, 0.)
max_absolute_colour_value = max([max_absolute_colour_value, 0.001])
error_checking.assert_is_greater_numpy_array(pressure_levels_mb, 0.)
error_checking.assert_is_numpy_array(pressure_levels_mb, num_dimensions=1)
error_checking.assert_is_list(sounding_field_names)
error_checking.assert_is_numpy_array(
numpy.array(sounding_field_names), num_dimensions=1)
num_pressure_levels = len(pressure_levels_mb)
num_sounding_fields = len(sounding_field_names)
error_checking.assert_is_numpy_array_without_nan(saliency_matrix)
error_checking.assert_is_numpy_array(
saliency_matrix,
exact_dimensions=numpy.array([num_pressure_levels, num_sounding_fields])
)
try:
u_wind_index = sounding_field_names.index(soundings.U_WIND_NAME)
v_wind_index = sounding_field_names.index(soundings.V_WIND_NAME)
plot_wind_barbs = True
except ValueError:
plot_wind_barbs = False
if plot_wind_barbs:
u_wind_saliency_values = saliency_matrix[:, u_wind_index]
v_wind_saliency_values = saliency_matrix[:, v_wind_index]
wind_saliency_magnitudes = numpy.sqrt(
u_wind_saliency_values ** 2 + v_wind_saliency_values ** 2)
colour_norm_object = pyplot.Normalize(
vmin=0., vmax=max_absolute_colour_value)
rgb_matrix_for_wind = colour_map_object(colour_norm_object(
wind_saliency_magnitudes
))[..., :-1]
non_wind_flags = numpy.array(
[f not in WIND_COMPONENT_NAMES for f in sounding_field_names],
dtype=bool
)
non_wind_indices = numpy.where(non_wind_flags)[0]
saliency_matrix = saliency_matrix[:, non_wind_indices]
sounding_field_names = [
sounding_field_names[k] for k in non_wind_indices
]
sounding_field_names.append(WIND_NAME)
num_sounding_fields = len(sounding_field_names)
rgb_matrix, font_size_matrix = _saliency_to_colour_and_size(
saliency_matrix=saliency_matrix, colour_map_object=colour_map_object,
max_absolute_colour_value=max_absolute_colour_value,
min_font_size=min_font_size, max_font_size=max_font_size)
_, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
axes_object.set_facecolor(
plotting_utils.colour_from_numpy_to_tuple(
SOUNDING_SALIENCY_BACKGROUND_COLOUR)
)
for k in range(num_sounding_fields):
if sounding_field_names[k] == WIND_NAME:
for j in range(num_pressure_levels):
this_vector = numpy.array([
u_wind_saliency_values[j], v_wind_saliency_values[j]
])
this_vector = (
WIND_SALIENCY_MULTIPLIER * this_vector
/ numpy.linalg.norm(this_vector, ord=2)
)
this_colour_tuple = plotting_utils.colour_from_numpy_to_tuple(
rgb_matrix_for_wind[j, ...]
)
axes_object.barbs(
k, pressure_levels_mb[j], this_vector[0], this_vector[1],
length=WIND_BARB_LENGTH, fill_empty=True, rounding=False,
sizes={'emptybarb': EMPTY_WIND_BARB_RADIUS},
color=this_colour_tuple)
continue
for j in range(num_pressure_levels):
this_colour_tuple = plotting_utils.colour_from_numpy_to_tuple(
rgb_matrix[j, k, ...]
)
if saliency_matrix[j, k] >= 0:
axes_object.text(
k, pressure_levels_mb[j], '+',
fontsize=font_size_matrix[j, k],
color=this_colour_tuple, horizontalalignment='center',
verticalalignment='center')
else:
axes_object.text(
k, pressure_levels_mb[j], '_',
fontsize=font_size_matrix[j, k],
color=this_colour_tuple, horizontalalignment='center',
verticalalignment='bottom')
axes_object.set_xlim(-0.5, num_sounding_fields - 0.5)
axes_object.set_ylim(100, 1000)
axes_object.invert_yaxis()
pyplot.yscale('log')
pyplot.minorticks_off()
y_tick_locations = numpy.linspace(100, 1000, num=10, dtype=int)
y_tick_labels = ['{0:d}'.format(p) for p in y_tick_locations]
pyplot.yticks(y_tick_locations, y_tick_labels)
x_tick_locations = numpy.linspace(
0, num_sounding_fields - 1, num=num_sounding_fields, dtype=float)
x_tick_labels = [
FIELD_NAME_TO_LATEX_DICT[f] for f in sounding_field_names
]
pyplot.xticks(x_tick_locations, x_tick_labels)
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object, data_matrix=saliency_matrix,
colour_map_object=colour_map_object, min_value=0.,
max_value=max_absolute_colour_value, orientation_string='vertical',
extend_min=False, extend_max=True)
colour_bar_object.set_label('Absolute saliency')
def plot_2d_grid_with_contours(
saliency_matrix_2d, axes_object, colour_map_object,
max_absolute_contour_level, contour_interval,
line_width=DEFAULT_CONTOUR_WIDTH):
"""Plots 2-D saliency map with line contours.
M = number of rows in spatial grid
N = number of columns in spatial grid
:param saliency_matrix_2d: M-by-N numpy array of saliency values.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param max_absolute_contour_level: Max absolute value to plot. Minimum
value will be `-1 * max_absolute_contour_level`.
:param contour_interval: Interval (in saliency units) between successive
contours.
:param line_width: Width of contour lines.
"""
error_checking.assert_is_geq(max_absolute_contour_level, 0.)
max_absolute_contour_level = max([max_absolute_contour_level, 0.001])
error_checking.assert_is_geq(contour_interval, 0.)
contour_interval = max([contour_interval, 0.0001])
error_checking.assert_is_numpy_array_without_nan(saliency_matrix_2d)
error_checking.assert_is_numpy_array(saliency_matrix_2d, num_dimensions=2)
error_checking.assert_is_less_than(
contour_interval, max_absolute_contour_level)
num_grid_rows = saliency_matrix_2d.shape[0]
num_grid_columns = saliency_matrix_2d.shape[1]
x_coord_spacing = num_grid_columns ** -1
y_coord_spacing = num_grid_rows ** -1
x_coords, y_coords = grids.get_xy_grid_points(
x_min_metres=x_coord_spacing / 2, y_min_metres=y_coord_spacing / 2,
x_spacing_metres=x_coord_spacing, y_spacing_metres=y_coord_spacing,
num_rows=num_grid_rows, num_columns=num_grid_columns)
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords, y_coords)
half_num_contours = int(numpy.round(
1 + max_absolute_contour_level / contour_interval
))
# Plot positive values.
these_contour_levels = numpy.linspace(
0., max_absolute_contour_level, num=half_num_contours)
axes_object.contour(
x_coord_matrix, y_coord_matrix, saliency_matrix_2d,
these_contour_levels, cmap=colour_map_object,
vmin=numpy.min(these_contour_levels),
vmax=numpy.max(these_contour_levels), linewidths=line_width,
linestyles='solid', zorder=1e6, transform=axes_object.transAxes)
# Plot negative values.
these_contour_levels = these_contour_levels[1:]
axes_object.contour(
x_coord_matrix, y_coord_matrix, -saliency_matrix_2d,
these_contour_levels, cmap=colour_map_object,
vmin=numpy.min(these_contour_levels),
vmax=numpy.max(these_contour_levels), linewidths=line_width,
linestyles='dashed', zorder=1e6, transform=axes_object.transAxes)
def plot_many_2d_grids_with_contours(
saliency_matrix_3d, axes_object_matrix, colour_map_object,
max_absolute_contour_level, contour_interval,
line_width=DEFAULT_CONTOUR_WIDTH, row_major=True):
"""Plots 2-D saliency map with line contours for each predictor.
M = number of rows in spatial grid
N = number of columns in spatial grid
P = number of predictors
:param saliency_matrix_3d: M-by-N-by-P numpy array of saliency values.
:param axes_object_matrix: See doc for
`plotting_utils.create_paneled_figure`.
:param colour_map_object: See doc for `plot_2d_grid_with_contours`.
:param max_absolute_contour_level: Same.
:param contour_interval: Same.
:param line_width: Same.
:param row_major: Boolean flag. If True, panels will be filled along rows
first, then down columns. If False, down columns first, then along
rows.
"""
error_checking.assert_is_numpy_array_without_nan(saliency_matrix_3d)
error_checking.assert_is_numpy_array(saliency_matrix_3d, num_dimensions=3)
error_checking.assert_is_boolean(row_major)
if row_major:
order_string = 'C'
else:
order_string = 'F'
num_predictors = saliency_matrix_3d.shape[-1]
num_panel_rows = axes_object_matrix.shape[0]
num_panel_columns = axes_object_matrix.shape[1]
for k in range(num_predictors):
this_panel_row, this_panel_column = numpy.unravel_index(
k, (num_panel_rows, num_panel_columns), order=order_string
)
plot_2d_grid_with_contours(
saliency_matrix_2d=saliency_matrix_3d[..., k],
axes_object=axes_object_matrix[this_panel_row, this_panel_column],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval, line_width=line_width)
def plot_2d_grid_with_pm_signs(
saliency_matrix_2d, axes_object, colour_map_object,
max_absolute_colour_value, min_font_size=DEFAULT_MIN_FONT_SIZE,
max_font_size=DEFAULT_MAX_FONT_SIZE):
"""Plots 2-D saliency map with plus and minus signs ("+" and "-").
M = number of rows in spatial grid
N = number of columns in spatial grid
:param saliency_matrix_2d: See doc for `plot_2d_grid_with_contours`.
:param axes_object: Same.
:param colour_map_object: Same.
:param max_absolute_colour_value: Same.
:param min_font_size: Minimum font size (used for zero saliency).
:param max_font_size: Max font size (used for max absolute value).
"""
error_checking.assert_is_geq(max_absolute_colour_value, 0.)
max_absolute_colour_value = max([max_absolute_colour_value, 0.001])
error_checking.assert_is_numpy_array_without_nan(saliency_matrix_2d)
error_checking.assert_is_numpy_array(saliency_matrix_2d, num_dimensions=2)
rgb_matrix, font_size_matrix = _saliency_to_colour_and_size(
saliency_matrix=saliency_matrix_2d, colour_map_object=colour_map_object,
max_absolute_colour_value=max_absolute_colour_value,
min_font_size=min_font_size, max_font_size=max_font_size)
num_grid_rows = saliency_matrix_2d.shape[0]
num_grid_columns = saliency_matrix_2d.shape[1]
x_coord_spacing = num_grid_columns ** -1
y_coord_spacing = num_grid_rows ** -1
x_coords, y_coords = grids.get_xy_grid_points(
x_min_metres=x_coord_spacing / 2, y_min_metres=y_coord_spacing / 2,
x_spacing_metres=x_coord_spacing, y_spacing_metres=y_coord_spacing,
num_rows=num_grid_rows, num_columns=num_grid_columns)
for i in range(num_grid_rows):
for j in range(num_grid_columns):
this_colour_tuple = plotting_utils.colour_from_numpy_to_tuple(
rgb_matrix[i, j, ...]
)
if saliency_matrix_2d[i, j] >= 0:
axes_object.text(
x_coords[i], y_coords[j], '+',
fontsize=font_size_matrix[i, j],
color=this_colour_tuple, horizontalalignment='center',
verticalalignment='center', transform=axes_object.transAxes)
else:
axes_object.text(
x_coords[i], y_coords[j], '_',
fontsize=font_size_matrix[i, j],
color=this_colour_tuple, horizontalalignment='center',
verticalalignment='bottom', transform=axes_object.transAxes)
def plot_many_2d_grids_with_pm_signs(
saliency_matrix_3d, axes_object_matrix, colour_map_object,
max_absolute_colour_value, min_font_size=DEFAULT_MIN_FONT_SIZE,
max_font_size=DEFAULT_MAX_FONT_SIZE, row_major=True):
"""Plots many 2-D saliency map with plus and minus signs ("+" and "-").
:param saliency_matrix_3d: See doc for `plot_many_2d_grids_with_contours`.
:param axes_object_matrix: Same.
:param colour_map_object: See doc for `plot_2d_grid_with_pm_signs`.
:param max_absolute_colour_value: Same.
:param min_font_size: Same.
:param max_font_size: Same.
:param row_major: See doc for `plot_many_2d_grids_with_contours`.
"""
error_checking.assert_is_numpy_array_without_nan(saliency_matrix_3d)
error_checking.assert_is_numpy_array(saliency_matrix_3d, num_dimensions=3)
error_checking.assert_is_boolean(row_major)
if row_major:
order_string = 'C'
else:
order_string = 'F'
num_predictors = saliency_matrix_3d.shape[-1]
num_panel_rows = axes_object_matrix.shape[0]
num_panel_columns = axes_object_matrix.shape[1]
for k in range(num_predictors):
this_panel_row, this_panel_column = numpy.unravel_index(
k, (num_panel_rows, num_panel_columns), order=order_string
)
plot_2d_grid_with_pm_signs(
saliency_matrix_2d=saliency_matrix_3d[..., k],
axes_object=axes_object_matrix[this_panel_row, this_panel_column],
colour_map_object=colour_map_object,
max_absolute_colour_value=max_absolute_colour_value,
min_font_size=min_font_size, max_font_size=max_font_size)
|
mit
|
daureg/illalla
|
neighborhood.py
|
1
|
34865
|
#! /usr/bin/python2
# vim: set fileencoding=utf-8
"""Match polygonal regions between cities
Input:
name of two cities
a list of coordinates that make up a polygon in one city
Output:
a list of coordinates that make up a polygon in the other city
"""
from __future__ import print_function
import cities
import ClosestNeighbor as cn
# import explore as xp
import numpy as np
import utils as u
import itertools as i
import shapely.geometry as sgeo
import scipy.cluster.vq as vq
import emd_leftover
import logging
# pylint: disable=E1101
# pylint: disable=W0621
NB_CLUSTERS = 3
JUST_READING = False
MAX_EMD_POINTS = 750
NO_WEIGHT = True
QUERY_NAME = None
GROUND_TRUTH = None
import os
OTMPDIR = os.environ.get('OTMPDIR')
def profile(func):
return func
@profile
def load_surroundings(city):
"""Load projected coordinates and extra field of all venues, checkins and
photos within `city`, as well as returning city geographical bounds."""
import persistent as p
surroundings = [p.load_var('{}_svenues.my'.format(city)), None, None]
# surroundings = [p.load_var('{}_s{}s.my'.format(city, kind))
# for kind in ['venue', 'checkin', 'photo']]
venues_pos = np.vstack(surroundings[0].loc)
city_extent = list(np.min(venues_pos, 0)) + list(np.max(venues_pos, 0))
return surroundings, city_extent
@profile
def polygon_to_local(city, geojson):
"""Convert a `geojson` geometry to a local bounding box in `city`, and
return its center, radius and a predicate indicating membership."""
assert geojson['type'] == 'Polygon'
coords = np.fliplr(np.array(geojson['coordinates'][0]))
projected = sgeo.Polygon(cities.GEO_TO_2D[city](coords))
minx, miny, maxx, maxy = projected.bounds
center = list(projected.centroid.coords[0])
radius = max(maxx - minx, maxy - miny)*0.5
return center, radius, projected.bounds, projected.contains
@profile
def describe_region(center, radius, belongs_to, surroundings, city_fv,
threshold=10):
"""Return the description (X, x, w, ids) of the region defined by
`center`, `radius` and `belongs_to`, provided that it contains enough
venues."""
svenues, scheckins, sphotos = surroundings
vids, _ = gather_entities(svenues, center, radius, belongs_to,
threshold)
if not vids:
return None, None, None, None
vids = filter(city_fv['index'].__contains__, vids)
if len(vids) < threshold:
return None, None, None, None
# _, ctime = gather_entities(scheckins, center, radius, belongs_to)
# _, ptime = gather_entities(sphotos, center, radius, belongs_to)
mask = np.where(np.in1d(city_fv['index'], vids))[0]
assert mask.size == len(vids)
weights = weighting_venues(mask if NO_WEIGHT else city_fv['users'][mask])
# time_activity = lambda visits: xp.aggregate_visits(visits, 1, 4)[0]
# activities = np.hstack([xp.to_frequency(time_activity(ctime)),
# xp.to_frequency(time_activity(ptime))])
activities = np.ones((12, 1))
return city_fv['features'][mask, :], activities, weights, vids
@profile
def features_support(features):
"""Return a list of intervals representing the support of the probability
distribution for each dimension."""
return zip(np.min(features, 0), np.max(features, 0))
@u.memodict
def right_bins(dim):
extent = RIGHT_SUPPORT[dim][1] - RIGHT_SUPPORT[dim][0]
bins = 10
size = 1.0/bins
return [RIGHT_SUPPORT[dim][0] + j*size*extent for j in range(bins+1)]
@profile
def features_as_density(features, weights, support, bins=10):
"""Turn raw `features` into probability distribution over each dimension,
with respect to `weights`."""
def get_bins_full(dim):
extent = support[dim][1] - support[dim][0]
size = 1.0/bins
return [support[dim][0] + j*size*extent for j in range(bins+1)]
get_bins = right_bins if support is RIGHT_SUPPORT else get_bins_full
return np.vstack([np.histogram(features[:, i], weights=weights,
bins=get_bins(i))[0]
for i in range(features.shape[1])])
def features_as_lists(features):
"""Turn numpy `features` into a list of list, suitable for emd
function."""
return features.tolist()
@profile
def weighting_venues(values):
"""Transform `values` into a list of positive weights that sum up to 1."""
if NO_WEIGHT:
return np.ones(values.size)/values.size
from sklearn.preprocessing import MinMaxScaler
scale = MinMaxScaler()
size = values.size
scaled = scale.fit_transform(np.power(values, .2).reshape((size, 1)))
normalized = scaled.ravel()/np.sum(scaled)
normalized[normalized < 1e-6] = 1e-6
return normalized
@profile
def gather_entities(surrounding, center, radius, belongs_to, threshold=0):
"""Filter points in `surrounding` that belong to the given region."""
ids, info, locs = surrounding.around(center, radius)
info = len(ids)*[0, ] if len(info) == 0 else list(info[0])
if len(ids) < threshold:
return None, None
if belongs_to is None:
return ids, info
is_inside = lambda t: belongs_to(sgeo.Point(t[2]))
res = zip(*(i.ifilter(is_inside, i.izip(ids, info, locs))))
if len(res) != 3:
return None, None
ids[:], info[:], locs[:] = res
if len(ids) < threshold:
return None, None
return ids, info
@profile
def jensen_shannon_divergence(P, Q):
"""Compute JSD(P || Q) as defined in
https://en.wikipedia.org/wiki/Jensen–Shannon_divergence """
avg = 0.5*(P + Q)
avg_entropy = 0.5*(u.compute_entropy(P) + u.compute_entropy(Q))
return u.compute_entropy(avg) - avg_entropy
@profile
def proba_distance(density1, global1, density2, global2, theta):
"""Compute total distances between all distributions"""
proba = np.dot(theta, [jensen_shannon_divergence(p, q)
for p, q in zip(density1, density2)])
return proba[0] + np.linalg.norm(global1 - global2)
SURROUNDINGS, CITY_FEATURES, THRESHOLD = None, None, None
METRIC_NAME, CITY_SUPPORT, DISTANCE_FUNCTION, RADIUS = None, None, None, None
RIGHT_SUPPORT = None
@profile
def generic_distance(metric, distance, features, weights, support,
c_times=None, id_=None):
"""Compute the distance of (`features`, `weights`) using `distance`
function (corresponding to `metric`)."""
if c_times is None:
c_times = np.ones((12, 1))
if 'emd' in metric:
c_density = features_as_lists(features)
supp = weights
elif 'cluster' == metric:
c_density = features
supp = weights
elif 'leftover' in metric:
c_density = features
supp = (weights, id_)
elif 'jsd' in metric:
c_density = features_as_density(features, weights, support)
supp = c_times
else:
raise ValueError('unknown metric {}'.format(metric))
return distance(c_density, supp)
@profile
def one_cell(args):
cx, cy, id_x, id_y, id_ = args
center = [cx, cy]
contains = None
candidate = describe_region(center, RADIUS, contains,
SURROUNDINGS, CITY_FEATURES,
THRESHOLD)
features, c_times, weights, c_vids = candidate
if features is not None:
distance = generic_distance(METRIC_NAME, DISTANCE_FUNCTION, features,
weights, CITY_SUPPORT, c_times=c_times,
id_=id_)
return [cx, cy, distance, c_vids]
else:
return [None, None, None, None]
@profile
def brute_search(city_desc, hsize, distance_function, threshold,
metric='jsd'):
"""Move a sliding circle over the whole city and keep track of the best
result."""
global SURROUNDINGS, CITY_FEATURES, THRESHOLD, RADIUS
global METRIC_NAME, CITY_SUPPORT, DISTANCE_FUNCTION
import multiprocessing
RADIUS = hsize
THRESHOLD = threshold
METRIC_NAME = metric
city_size, CITY_SUPPORT, CITY_FEATURES, city_infos = city_desc
SURROUNDINGS, bounds = city_infos
DISTANCE_FUNCTION = distance_function
minx, miny, maxx, maxy = bounds
nb_x_step = int(3*np.floor(city_size[0]) / hsize + 1)
nb_y_step = int(3*np.floor(city_size[1]) / hsize + 1)
best = [1e20, [], [], RADIUS]
res_map = []
pool = multiprocessing.Pool(4)
x_steps = np.linspace(minx+hsize, maxx-hsize, nb_x_step)
y_steps = np.linspace(miny+hsize, maxy-hsize, nb_y_step)
x_vals, y_vals = np.meshgrid(x_steps, y_steps)
to_cell_arg = lambda _: (float(_[1][0]), float(_[1][1]), _[0] % nb_x_step,
_[0]/nb_x_step, _[0])
cells = i.imap(to_cell_arg, enumerate(i.izip(np.nditer(x_vals),
np.nditer(y_vals))))
res = pool.map(one_cell, cells)
pool.close()
pool.join()
res_map = []
if metric == 'leftover':
dsts = emd_leftover.collect_matlab_output(len(res))
for cell, dst in i.izip(res, dsts):
if cell[0]:
cell[2] = dst
clean_tmp_mats()
for cell in res:
if cell[0] is None:
continue
res_map.append(cell[:3])
if cell[2] < best[0]:
best = [cell[2], cell[3], [cell[0], cell[1]], RADIUS]
if QUERY_NAME:
import persistent as p
logging.info('wrote: '+str(os.path.join(OTMPDIR, QUERY_NAME)))
p.save_var(os.path.join(OTMPDIR, QUERY_NAME),
[[cell[2], cell[3], [cell[0], cell[1]], RADIUS]
for cell in res if cell[0]])
yield best, res_map, 1.0
def interpret_query(from_city, to_city, region, metric):
"""Load informations about cities and compute useful quantities."""
# Load info of the first city
suffix = '_tsne.mat' if metric == 'emd-tsne' else ''
left = cn.gather_info(from_city+suffix, knn=1,
raw_features='lmnn' not in metric,
hide_category=metric != 'jsd')
left_infos = load_surroundings(from_city)
left_support = features_support(left['features'])
# Compute info about the query region
center, radius, _, contains = polygon_to_local(from_city, region)
query = describe_region(center, radius, contains, left_infos[0], left)
features, times, weights, vids = query
# print('{} venues in query region.'.format(len(vids)))
venue_proportion = 1.0*len(vids) / left['features'].shape[0]
# And use them to define the metric that will be used
theta = np.ones((1, left['features'].shape[1]))
theta = np.array([[0.0396, 0.0396, 0.2932, 0.0396, 0.0396, 0.0396,
0.0396, 0.3404, 0.0396, 0.0396, 0.0396, 0.0396,
0.0396, 0.3564, 0.0396, 0.3564, 0.0396, 0.3564,
0.3564, 0.3564, 0.0396, 0.0396, 0.0396, 0.0396,
0.3564, 0.0396, 0.0396, 0.0396, 0.0396, 0.0396,
0.0396]])
ltheta = len(theta.ravel())*[1, ]
if 'emd' in metric:
from emd import emd
from emd_dst import dist_for_emd
if 'tsne' in metric:
from specific_emd_dst import dst_tsne as dist_for_emd
if 'itml' in metric:
from specific_emd_dst import dst_itml as dist_for_emd
query_num = features_as_lists(features)
@profile
def regions_distance(r_features, r_weigths):
if len(r_features) >= MAX_EMD_POINTS:
return 1e20
return emd((query_num, map(float, weights)),
(r_features, map(float, r_weigths)),
lambda a, b: float(dist_for_emd(a, b, ltheta)))
elif 'cluster' in metric:
from scipy.spatial.distance import cdist
query_num = weighted_clusters(features, NB_CLUSTERS, weights)
def regions_distance(r_features, r_weigths):
r_cluster = weighted_clusters(r_features, NB_CLUSTERS, r_weigths)
costs = cdist(query_num, r_cluster).tolist()
return min_cost(costs)
elif 'leftover' in metric:
@profile
def regions_distance(r_features, second_arg):
r_weigths, idx = second_arg
emd_leftover.write_matlab_problem(features, weights, r_features,
r_weigths, idx)
return -1
else:
query_num = features_as_density(features, weights, left_support)
@profile
def regions_distance(r_density, r_global):
"""Return distance of a region from `query_num`."""
return proba_distance(query_num, times, r_density, r_global,
theta)
# Load info of the target city
right = cn.gather_info(to_city+suffix, knn=2,
raw_features='lmnn' not in metric,
hide_category=metric != 'jsd')
right_infos = load_surroundings(to_city)
minx, miny, maxx, maxy = right_infos[1]
right_city_size = (maxx - minx, maxy - miny)
right_support = features_support(right['features'])
global RIGHT_SUPPORT
RIGHT_SUPPORT = right_support
# given extents, compute threshold of candidate
threshold = 0.7 * venue_proportion * right['features'].shape[0]
right_desc = [right_city_size, right_support, right, right_infos]
return [left, right, right_desc, regions_distance, vids, threshold]
def best_match(from_city, to_city, region, tradius, progressive=False,
metric='jsd'):
"""Try to match a `region` from `from_city` to `to_city`. If progressive,
yield intermediate result."""
assert metric in ['jsd', 'emd', 'jsd-nospace', 'jsd-greedy', 'cluster',
'leftover', 'emd-lmnn', 'emd-itml', 'emd-tsne']
infos = interpret_query(from_city, to_city, region, metric)
left, right, right_desc, regions_distance, vids, threshold = infos
threshold /= 4.0
if JUST_READING:
yield vids, None, None
raise Exception()
res, vals = None, None
if metric.endswith('-nospace'):
res, vals = search_no_space(vids, 10.0/7*threshold, regions_distance,
left, right, RIGHT_SUPPORT)
elif metric.endswith('-greedy'):
res, vals = greedy_search(10.0/7*threshold, regions_distance, right,
RIGHT_SUPPORT)
else:
# Use case for https://docs.python.org/3/whatsnew/3.3.html#pep-380
for res, vals, progress in brute_search(right_desc, tradius,
regions_distance, threshold,
metric=metric):
if progressive:
yield res, vals, progress
else:
print(progress, end='\t')
yield res, vals, 1.0
@profile
def weighted_clusters(venues, k, weights):
"""Return `k` centroids from `venues` (clustering is unweighted by
centroid computation honors `weights` of each venues)."""
labels = np.zeros(venues.shape[0])
if k > 1:
nb_tries = 0
while len(np.unique(labels)) != k and nb_tries < 5:
_, labels = vq.kmeans2(venues, k, iter=5, minit='points')
nb_tries += 1
try:
return np.array([np.average(venues[labels == i, :], 0,
weights[labels == i])
for i in range(k)])
except ZeroDivisionError:
print(labels)
print(weights)
print(np.sum(weights))
raise
@profile
def min_cost(costs):
"""Return average min-cost of assignment of row and column of the `costs`
matrix."""
import munkres
assignment = munkres.Munkres().compute(costs)
cost = sum([costs[r][c] for r, c in assignment])
return cost/len(costs)
def one_method_seed_regions(from_city, to_city, region, metric,
candidate_generation, clustering):
"""Return promising clusters matching `region`."""
assert candidate_generation in ['knn', 'dst']
assert clustering in ['discrepancy', 'dbscan']
infos = interpret_query(from_city, to_city, region, metric)
left, right, right_desc, regions_distance, vids, threshold = infos
if candidate_generation == 'knn':
candidates = get_knn_candidates(vids, left, right, threshold,
at_most=15*threshold)
elif candidate_generation == 'dst':
candidates = get_neighborhood_candidates(regions_distance, right,
metric, at_most=15*threshold)
clusters = find_promising_seeds(candidates[1], right_desc[3][0][0],
clustering, right)
how_many = min(len(clusters), 6)
msg = 'size of cluster: '
msg += str([len(_[1]) for _ in clusters])
msg += '\ndistance, radius, nb_venues:\n'
print(msg)
for cluster in clusters[:how_many]:
mask = np.where(np.in1d(right['index'], cluster[1]+cluster[2]))[0]
weights = weighting_venues(right['users'][mask])
features = right['features'][mask, :]
dst = generic_distance(metric, regions_distance, features, weights,
support=right_desc[1])
msg += '{:.4f}, {:.1f}, {}\n'.format(dst, np.sqrt(cluster[0].area),
len(mask))
print(msg)
return [_[1] for _ in clusters[:how_many]], msg
def get_seed_regions(from_city, to_city, region):
for metric in ['jsd', 'emd']:
infos = interpret_query(from_city, to_city, region, metric)
left, right, right_desc, regions_distance, vids, threshold = infos
knn_cds = get_knn_candidates(vids, left, right, threshold, at_most=250)
ngh_cds = get_neighborhood_candidates(regions_distance, right, metric,
at_most=250)
for _, candidates in [knn_cds, ngh_cds]:
for scan in ['dbscan', 'discrepancy']:
clusters = find_promising_seeds(candidates,
right_desc[3][0][0], scan,
right)
for cl in clusters:
print(metric, scan, cl[1])
@profile
def greedy_search(nb_venues, distance_function, right_knn, support):
"""Find `nb_venues` in `right_knn` that optimize the total distance
according to `distance_function`."""
import random as r
candidates_idx = []
nb_venues = int(nb_venues)+3
while len(candidates_idx) < nb_venues:
best_dst, best_idx = 1e15, 0
for ridx in range(len(right_knn['index'])):
if ridx in candidates_idx or r.random() > 0.3:
continue
mask = np.array([ridx] + candidates_idx)
weights = weighting_venues(right_knn['users'][mask])
activities = np.ones((12, 1))
features = right_knn['features'][mask, :]
density = features_as_density(features, weights, support)
distance = distance_function(density, activities)
if distance < best_dst:
best_dst, best_idx = distance, ridx
candidates_idx.append(best_idx)
print('add: {}. dst = {:.4f}'.format(right_knn['index'][best_idx],
best_dst))
r_vids = [right_knn['index'][_] for _ in candidates_idx]
return [best_dst, r_vids, [], -1], None
def get_knn_candidates(vids, left_knn, right_knn, at_least, at_most=None):
"""Return between `at_least` and `at_most` venue in right that are close (in
the sense of euclidean distance) of the `vids` in left. Namely, it return
their row number and their ids."""
import heapq
candidates = []
candidates_id = []
knn = right_knn['knn']
at_most = int(at_most) or 50000
nb_venues = min(at_most, max(len(vids)*knn, at_least))
for idx, vid in enumerate(vids):
_, rid, ridx, dst, _ = cn.find_closest(vid, left_knn, right_knn)
for dst_, rid_, ridx_, idx_ in zip(dst, rid, ridx, range(knn)):
if rid_ not in candidates_id:
candidates_id.append(rid_)
heapq.heappush(candidates, (dst_, idx*knn+idx_,
(rid_, ridx_)))
nb_venues = min(len(candidates), int(nb_venues))
closest = heapq.nsmallest(nb_venues, candidates)
mask = np.array([v[2][1] for v in closest])
r_vids = np.array([v[2][0] for v in closest])
return mask, r_vids
def get_neighborhood_candidates(distance_function, right_knn, metric,
at_most=None):
candidates = []
activities = np.ones((12, 1))
weights = [1.0]
nb_dims = right_knn['features'].shape[1]
for idx, vid in enumerate(right_knn['index']):
features = right_knn['features'][idx, :].reshape(1, nb_dims)
if 'jsd' in metric:
density = features_as_density(features, weights, RIGHT_SUPPORT)
dst = distance_function(density, activities)
elif 'emd' in metric:
dst = distance_function([list(features.ravel())], weights)
else:
raise ValueError('unknown metric {}'.format(metric))
candidates.append((dst, idx, vid))
nb_venues = min(int(at_most), len(candidates))
closest = sorted(candidates, key=lambda x: x[0])[:nb_venues]
mask = np.array([v[1] for v in closest])
r_vids = np.array([v[2] for v in closest])
return mask, r_vids
def search_no_space(vids, nb_venues, distance_function, left_knn, right_knn,
support):
"""Find `nb_venues` in `right_knn` that are close to those in `vids` (in
the sense of euclidean distance) and return the distance with this
“virtual” neighborhood (for comparaison purpose)"""
mask, r_vids = get_knn_candidates(vids, left_knn, right_knn, nb_venues)
weights = weighting_venues(right_knn['users'][mask])
activities = np.ones((12, 1))
features = right_knn['features'][mask, :]
density = features_as_density(features, weights, support)
distance = distance_function(density, activities)
return [distance, r_vids, [], -1], None
def interpolate_distances(values_map, filename):
"""Plot the distance at every circle center and interpolate between"""
from scipy.interpolate import griddata
from matplotlib import pyplot as plt
import persistent as p
filename = os.path.join('distance_map', filename)
x, y, z = [np.array(dim) for dim in zip(*[a for a in values_map])]
x_ext = [x.min(), x.max()]
y_ext = [y.min(), y.max()]
xi = np.linspace(x_ext[0], x_ext[1], 100)
yi = np.linspace(y_ext[0], y_ext[1], 100)
zi = griddata((x, y), z, (xi[None, :], yi[:, None]), method='cubic')
fig = plt.figure(figsize=(22, 18))
plt.contour(xi, yi, zi, 20, linewidths=0.8, colors='#282828')
plt.contourf(xi, yi, zi, 20, cmap=plt.cm.Greens)
plt.colorbar()
plt.scatter(x, y, marker='o', c='#282828', s=5)
plt.tight_layout(pad=0)
plt.xlim(*x_ext)
plt.ylim(*y_ext)
plt.savefig(filename, dpi=96, transparent=False, frameon=False,
bbox_inches='tight', pad_inches=0.01)
p.save_var(filename.replace('.png', '.my'), values_map)
plt.close(fig)
def choose_query_region(ground_truths):
"""Pick among all `ground_truths` regions one that have at least 20
venues, and is closest to 150."""
if not ground_truths:
return None
area_size = [(area, len(area['properties']['venues']))
for area in ground_truths
if len(area['properties']['venues']) >= 20]
if not area_size:
return None
return sorted(area_size, key=lambda x: abs(150 - x[1]))[0][0]['geometry']
def batch_matching(query_city='paris'):
"""Match preselected regions of `query_city` into the other target
cities"""
import ujson
global QUERY_NAME
global OTMPDIR
with open('static/ground_truth.json') as gt:
regions = ujson.load(gt)
districts = sorted(regions.keys())
cities = sorted(regions.values()[0]['gold'].keys())
assert query_city in cities
cities.remove(query_city)
OTMPDIR = os.path.join(OTMPDIR, 'www_comparaison_'+query_city)
try:
os.mkdir(OTMPDIR)
except OSError:
pass
# cities = ['berlin']
# districts = ['montmartre', 'triangle']
for city in cities:
print(city)
for neighborhood in districts:
# for _ in [1]:
# for city, neighborhood in [('washington', 'marais'), ('washington', 'montmartre')]:
print(neighborhood)
possible_regions = regions[neighborhood]['gold'].get(query_city)
rgeo = choose_query_region(possible_regions)
if not rgeo:
continue
for metric in ['emd-itml', 'emd-tsne']:
# for metric in ['jsd', 'emd', 'cluster', 'emd-lmnn', 'leftover']:
print(metric)
for radius in np.linspace(200, 500, 5):
print(radius)
QUERY_NAME = '{}_{}_{}_{}.my'.format(city, neighborhood,
int(radius),
metric)
logging.info('will write: '+str(os.path.join(OTMPDIR, QUERY_NAME)))
if os.path.isfile(os.path.join(OTMPDIR, QUERY_NAME)):
continue
res, values, _ = best_match(query_city, city, rgeo, radius,
metric=metric).next()
continue
distance, r_vids, center, radius = res
print(distance)
if center is None:
result = {'dst': distance, 'metric': metric,
'nb_venues': 0}
else:
center = cities.euclidean_to_geo(city, center)
result = {'geo': {'type': 'circle',
'center': center, 'radius': radius},
'dst': distance, 'metric': metric,
'nb_venues': len(r_vids)}
regions[neighborhood][city].append(result)
# outname = '{}_{}_{}_{}.png'.format(city, neighborhood,
# int(radius), metric)
# interpolate_distances(values, outname)
with open('static/cpresets.js', 'w') as out:
out.write('var PRESETS =' + ujson.dumps(regions) + ';')
def find_promising_seeds(good_ids, venues_infos, method, right):
"""Try to find high concentration of `good_ids` venues among all
`venues_infos` using one of the following methods:
['dbscan'|'discrepancy'].
Return a list of convex hulls with associated list of good and bad
venues id"""
vids, _, venues_loc = venues_infos.all()
significant_id = {vid: loc for vid, loc in i.izip(vids, venues_loc)
if vid in right['index']}
good_loc = np.array([significant_id[v] for v in good_ids])
bad_ids = [v for v in significant_id.iterkeys() if v not in good_ids]
bad_loc = np.array([significant_id[v] for v in bad_ids])
if method == 'discrepancy':
hulls, gcluster, bcluster = discrepancy_seeds((good_ids, good_loc),
(bad_ids, bad_loc),
np.array(venues_loc))
elif method == 'dbscan':
hulls, gcluster, bcluster = dbscan_seeds((good_ids, good_loc),
(bad_ids, bad_loc))
else:
raise ValueError('{} is not supported'.format(method))
clusters = zip(hulls, gcluster, bcluster)
return sorted(clusters, key=lambda x: len(x[1]), reverse=True)
def discrepancy_seeds(goods, bads, all_locs):
"""Find regions with concentration of good points compared with bad
ones."""
import spatial_scan as sps
size = 50
support = 8
sps.GRID_SIZE = size
sps.TOP_K = 500
xedges, yedges = [np.linspace(low, high, size+1)
for low, high in zip(np.min(all_locs, 0),
np.max(all_locs, 0))]
bins = (xedges, yedges)
good_ids, good_loc = goods
bad_ids, bad_loc = bads
count, _, _ = np.histogram2d(good_loc[:, 0], good_loc[:, 1], bins=bins)
measured = count.T.ravel()
count, _, _ = np.histogram2d(bad_loc[:, 0], bad_loc[:, 1], bins=bins)
background = count.T.ravel()
total_b = np.sum(background)
total_m = np.sum(measured)
discrepancy = sps.get_discrepancy_function(total_m, total_b, support)
def euc_index_to_rect(idx):
"""Return the bounding box of a grid's cell defined by its
`idx`"""
i = idx % size
j = idx / size
return [xedges[i], yedges[j], xedges[i+1], yedges[j+1]]
sps.index_to_rect = euc_index_to_rect
top_loc = sps.exact_grid(np.reshape(measured, (size, size)),
np.reshape(background, (size, size)),
discrepancy, sps.TOP_K,
sps.GRID_SIZE/8)
merged = sps.merge_regions(top_loc)
gcluster = []
bcluster = []
hulls = []
for region in merged:
gcluster.append([id_ for id_, loc in zip(good_ids, good_loc)
if region[1].contains(sgeo.Point(loc))])
bcluster.append([id_ for id_, loc in zip(bad_ids, bad_loc)
if region[1].contains(sgeo.Point(loc))])
hulls.append(region[1].convex_hull)
return hulls, gcluster, bcluster
def dbscan_seeds(goods, bads):
"""Find regions with concentration of good points."""
from scipy.spatial import ConvexHull
import sklearn.cluster as cl
good_ids, good_loc = goods
bad_ids, bad_loc = bads
labels = cl.DBSCAN(eps=150, min_samples=8).fit_predict(good_loc)
gcluster = []
bcluster = []
hulls = []
for cluster in range(len(np.unique(labels))-1):
points = good_loc[labels == cluster, :]
hull = sgeo.Polygon(points[ConvexHull(points).vertices])
gcluster.append(list(i.compress(good_ids, labels == cluster)))
bcluster.append([id_ for id_, loc in zip(bad_ids, bad_loc)
if hull.contains(sgeo.Point(loc))])
hulls.append(hull)
return hulls, gcluster, bcluster
def get_gold_desc(city, district):
"""Return a feature description of each gold region of
(`city`, `district`)."""
try:
golds = [_['properties']['venues']
for _ in GROUND_TRUTH[district]['gold'][city['city']]]
except KeyError as oops:
print(oops)
return None
res = []
for vids in golds:
mask = np.where(np.in1d(city['index'], vids))[0]
assert mask.size == len(vids)
weights = weighting_venues(city['users'][mask])
activities = np.ones((12, 1))
res.append((city['features'][mask, :], activities, weights, vids))
return res
def all_gold_dst():
"""Compute the distance between all gold regions and the query ones for
all metrics."""
assert GROUND_TRUTH, 'load GROUND_TRUTH before calling'
districts = GROUND_TRUTH.keys()
cities = GROUND_TRUTH.items()[0][1]['gold'].keys()
cities.remove('paris')
metrics = ['cluster', 'emd', 'emd-lmnn', 'jsd']
results = {}
for city, district in i.product(cities, districts):
geo = GROUND_TRUTH[district]['gold']['paris'][0]['geometry']
for metric in metrics:
name = '_'.join([city, district, metric])
info = interpret_query('paris', city, geo, metric)
_, target_city, target_desc, regions_distance, _, threshold = info
support = target_desc[1]
candidates = get_gold_desc(target_city, district)
if not candidates:
print(name + ' is empty')
continue
current_dsts = []
for region in candidates:
features, _, weights, _ = region
if metric == 'cluster' and weights.size < 3:
print("{}: can't make three clusters".format(name))
continue
dst = generic_distance(metric, regions_distance, features,
weights, support)
if metric == 'leftover':
dst = emd_leftover.collect_matlab_output(1)
clean_tmp_mats()
current_dsts.append(dst)
results[name] = current_dsts
return results
def clean_tmp_mats():
"""Remove .mat file after leftover metric has finished its computation."""
from subprocess import check_call, CalledProcessError
try:
check_call('rm /tmp/mats/*.mat', shell=True)
except CalledProcessError:
pass
if __name__ == '__main__':
# pylint: disable=C0103
# import json
# with open('static/ground_truth.json') as gt:
# GROUND_TRUTH = json.load(gt)
# import persistent as p
# distances = all_gold_dst()
# p.save_var('all_gold.my', distances)
import sys
batch_matching(sys.argv[1])
sys.exit()
import arguments
args = arguments.two_cities().parse_args()
origin, dest = args.origin, args.dest
user_input = {"type": "Polygon",
"coordinates": [[[2.3006272315979004, 48.86419005209702],
[2.311570644378662, 48.86941264251879],
[2.2995758056640625, 48.872983451383305],
[2.3006272315979004, 48.86419005209702]]]}
get_seed_regions(origin, dest, user_input)
sys.exit()
res, values, _ = best_match(origin, dest, user_input, 400,
metric='leftover').next()
distance, r_vids, center, radius = res
print(distance)
sys.exit()
for _ in sorted(r_vids):
print("'{}',".format(str(_)))
# print(distance, cities.euclidean_to_geo(dest, center))
# interpolate_distances(values, origin+dest+'.png')
# KDE preprocessing
# given all tweets, bin them according to time.
# Then run KDE on each bin, and compute a normalized grid in both cities
# (it's not cheap, but it's amortized over all queries)
# Then, when given a query, compute its average value for each time
# set a narrow range around each value and take the intersection of all
# point within this range in the other city.
# Increase range until we get big enough surface
# (or at least starting point)
|
mit
|
datapythonista/pandas
|
pandas/tests/indexing/multiindex/test_indexing_slow.py
|
2
|
2800
|
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
m = 50
n = 1000
cols = ["jim", "joe", "jolie", "joline", "jolia"]
vals = [
np.random.randint(0, 10, n),
np.random.choice(list("abcdefghij"), n),
np.random.choice(pd.date_range("20141009", periods=10).tolist(), n),
np.random.choice(list("ZYXWVUTSRQ"), n),
np.random.randn(n),
]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [
np.random.randint(0, 11, m),
np.random.choice(list("abcdefghijk"), m),
np.random.choice(pd.date_range("20141009", periods=11).tolist(), m),
np.random.choice(list("ZYXWVUTSRQP"), m),
]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[:: n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a = pd.concat([df, df])
b = df.drop_duplicates(subset=cols[:-1])
def validate(mi, df, key):
# check indexing into a multi-index before & past the lexsort depth
mask = np.ones(len(df)).astype("bool")
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[: i + 1] not in mi.index
continue
assert key[: i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
assert return_value is None
return_value = right.set_index(cols[i + 1 : -1], inplace=True)
assert return_value is None
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
else: # full key
return_value = right.set_index(cols[:-1], inplace=True)
assert return_value is None
if len(right) == 1: # single hit
right = Series(
right["jolia"].values, name=right.index[0], index=["jolia"]
)
tm.assert_series_equal(mi.loc[key[: i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
@pytest.mark.parametrize("lexsort_depth", list(range(5)))
@pytest.mark.parametrize("key", keys)
@pytest.mark.parametrize("frame", [a, b])
def test_multiindex_get_loc(lexsort_depth, key, frame):
# GH7724, GH2646
with warnings.catch_warnings(record=True):
if lexsort_depth == 0:
df = frame.copy()
else:
df = frame.sort_values(by=cols[:lexsort_depth])
mi = df.set_index(cols[:-1])
assert not mi.index._lexsort_depth < lexsort_depth
validate(mi, df, key)
|
bsd-3-clause
|
hippke/TTV-TDV-exomoons
|
create_figures/system_16.py
|
1
|
7441
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.59293316 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *thirdmoon.px ** 3) / (G * (thirdmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 3.005
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.15, +0.15)
plt.ylim(-0.65, +0.65)
plt.annotate(r"5:4:3", xy=(-0.145, +0.55), size=16)
plt.savefig("fig_system_16.eps", bbox_inches = 'tight')
|
mit
|
Lind-Project/native_client
|
tests/debug_stub/debug_stub_test.py
|
2
|
26691
|
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import struct
import subprocess
import sys
import unittest
import xml.etree.ElementTree
import gdb_rsp
NACL_SIGTRAP = 5
NACL_SIGSEGV = 11
# These are set up by Main().
ARCH = None
NM_TOOL = None
SEL_LDR_COMMAND = None
def AssertEquals(x, y):
if x != y:
raise AssertionError('%r != %r' % (x, y))
def DecodeHex(data):
assert len(data) % 2 == 0, data
return ''.join([chr(int(data[index * 2 : (index + 1) * 2], 16))
for index in xrange(len(data) / 2)])
def EncodeHex(data):
return ''.join('%02x' % ord(byte) for byte in data)
X86_32_REG_DEFS = [
('eax', 'I'),
('ecx', 'I'),
('edx', 'I'),
('ebx', 'I'),
('esp', 'I'),
('ebp', 'I'),
('esi', 'I'),
('edi', 'I'),
('eip', 'I'),
('eflags', 'I'),
('cs', 'I'),
('ss', 'I'),
('ds', 'I'),
('es', 'I'),
('fs', 'I'),
('gs', 'I'),
]
X86_64_REG_DEFS = [
('rax', 'Q'),
('rbx', 'Q'),
('rcx', 'Q'),
('rdx', 'Q'),
('rsi', 'Q'),
('rdi', 'Q'),
('rbp', 'Q'),
('rsp', 'Q'),
('r8', 'Q'),
('r9', 'Q'),
('r10', 'Q'),
('r11', 'Q'),
('r12', 'Q'),
('r13', 'Q'),
('r14', 'Q'),
('r15', 'Q'),
('rip', 'Q'),
('eflags', 'I'),
('cs', 'I'),
('ss', 'I'),
('ds', 'I'),
('es', 'I'),
('fs', 'I'),
('gs', 'I'),
]
ARM_REG_DEFS = ([('r%d' % regno, 'I') for regno in xrange(16)]
+ [('cpsr', 'I')])
REG_DEFS = {
'x86-32': X86_32_REG_DEFS,
'x86-64': X86_64_REG_DEFS,
'arm': ARM_REG_DEFS,
}
SP_REG = {
'x86-32': 'esp',
'x86-64': 'rsp',
'arm': 'r13',
}
IP_REG = {
'x86-32': 'eip',
'x86-64': 'rip',
'arm': 'r15',
}
X86_TRAP_FLAG = 1 << 8
# RESET_X86_FLAGS_VALUE is what ASM_WITH_REGS() resets the x86 flags
# to. Copied from tests/common/register_set.h.
RESET_X86_FLAGS_VALUE = (1 << 2) | (1 << 6)
KNOWN_X86_FLAGS_MASK = (1<<0) | (1<<2) | (1<<6) | (1<<7) | (1<<11) | (1<<8)
# These are the only ARM CPSR bits that user code and untrusted code
# can read and modify, excluding the IT bits which are for Thumb-2
# (for If-Then-Else instructions). Copied from
# tests/common/register_set.h.
ARM_USER_CPSR_FLAGS_MASK = (
(1<<31) | # N
(1<<30) | # Z
(1<<29) | # C
(1<<28) | # V
(1<<27) | # Q
(1<<19) | (1<<18) | (1<<17) | (1<<16)) # GE bits
def DecodeRegs(reply):
defs = REG_DEFS[ARCH]
names = [reg_name for reg_name, reg_fmt in defs]
fmt = ''.join([reg_fmt for reg_name, reg_fmt in defs])
values = struct.unpack_from(fmt, DecodeHex(reply))
return dict(zip(names, values))
def EncodeRegs(regs):
defs = REG_DEFS[ARCH]
names = [reg_name for reg_name, reg_fmt in defs]
fmt = ''.join([reg_fmt for reg_name, reg_fmt in defs])
values = [regs[r] for r in names]
return EncodeHex(struct.pack(fmt, *values))
def PopenDebugStub(test):
gdb_rsp.EnsurePortIsAvailable()
return subprocess.Popen(SEL_LDR_COMMAND + ['-g', test])
def KillProcess(process):
try:
process.kill()
except OSError:
if sys.platform == 'win32':
# If process is already terminated, kill() throws
# "WindowsError: [Error 5] Access is denied" on Windows.
pass
else:
raise
process.wait()
class LaunchDebugStub(object):
def __init__(self, test):
self._proc = PopenDebugStub(test)
def __enter__(self):
try:
return gdb_rsp.GdbRspConnection()
except:
KillProcess(self._proc)
raise
def __exit__(self, exc_type, exc_value, traceback):
KillProcess(self._proc)
def GetSymbols():
assert '-f' in SEL_LDR_COMMAND
nexe_filename = SEL_LDR_COMMAND[SEL_LDR_COMMAND.index('-f') + 1]
symbols = {}
proc = subprocess.Popen([NM_TOOL, '--format=posix', nexe_filename],
stdout=subprocess.PIPE)
for line in proc.stdout:
match = re.match('(\S+) [TtWwBD] ([0-9a-fA-F]+)', line)
if match is not None:
name = match.group(1)
addr = int(match.group(2), 16)
symbols[name] = addr
result = proc.wait()
assert result == 0, result
return symbols
def ParseThreadStopReply(reply):
match = re.match('T([0-9a-f]{2})thread:([0-9a-f]+);$', reply)
if match is None:
raise AssertionError('Bad thread stop reply: %r' % reply)
return {'signal': int(match.group(1), 16),
'thread_id': int(match.group(2), 16)}
def AssertReplySignal(reply, signal):
AssertEquals(ParseThreadStopReply(reply)['signal'], signal)
def ReadMemory(connection, address, size):
reply = connection.RspRequest('m%x,%x' % (address, size))
assert not reply.startswith('E'), reply
return DecodeHex(reply)
def ReadUint32(connection, address):
return struct.unpack('I', ReadMemory(connection, address, 4))[0]
class DebugStubTest(unittest.TestCase):
def test_initial_breakpoint(self):
# Any arguments to the nexe would work here because we are only
# testing that we get a breakpoint at the _start entry point.
with LaunchDebugStub('test_getting_registers') as connection:
reply = connection.RspRequest('?')
AssertReplySignal(reply, NACL_SIGTRAP)
def CheckTargetXml(self, connection):
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
self.assertEquals(reply[0], 'l')
# Just check that we are given parsable XML.
xml.etree.ElementTree.fromstring(reply[1:])
# Test that we can fetch register values.
# This check corresponds to the last instruction of debugger_test.c
def CheckReadRegisters(self, connection):
registers = DecodeRegs(connection.RspRequest('g'))
if ARCH == 'x86-32':
self.assertEquals(registers['eax'], 0x11000022)
self.assertEquals(registers['ebx'], 0x22000033)
self.assertEquals(registers['ecx'], 0x33000044)
self.assertEquals(registers['edx'], 0x44000055)
self.assertEquals(registers['esi'], 0x55000066)
self.assertEquals(registers['edi'], 0x66000077)
self.assertEquals(registers['ebp'], 0x77000088)
self.assertEquals(registers['esp'], 0x88000099)
self.assertEquals(registers['eflags'] & KNOWN_X86_FLAGS_MASK,
RESET_X86_FLAGS_VALUE)
elif ARCH == 'x86-64':
self.assertEquals(registers['rax'], 0x1100000000000022)
self.assertEquals(registers['rbx'], 0x2200000000000033)
self.assertEquals(registers['rcx'], 0x3300000000000044)
self.assertEquals(registers['rdx'], 0x4400000000000055)
self.assertEquals(registers['rsi'], 0x5500000000000066)
self.assertEquals(registers['rdi'], 0x6600000000000077)
self.assertEquals(registers['r8'], 0x7700000000000088)
self.assertEquals(registers['r9'], 0x8800000000000099)
self.assertEquals(registers['r10'], 0x99000000000000aa)
self.assertEquals(registers['r11'], 0xaa000000000000bb)
self.assertEquals(registers['r12'], 0xbb000000000000cc)
self.assertEquals(registers['r13'], 0xcc000000000000dd)
self.assertEquals(registers['r14'], 0xdd000000000000ee)
self.assertEquals(registers['rsp'], registers['r15'] + 0x12300321)
self.assertEquals(registers['rbp'], registers['r15'] + 0x23400432)
self.assertEquals(registers['eflags'] & KNOWN_X86_FLAGS_MASK,
RESET_X86_FLAGS_VALUE)
elif ARCH == 'arm':
self.assertEquals(registers['r0'], 0x00000001)
self.assertEquals(registers['r1'], 0x10000002)
self.assertEquals(registers['r2'], 0x20000003)
self.assertEquals(registers['r3'], 0x30000004)
self.assertEquals(registers['r4'], 0x40000005)
self.assertEquals(registers['r5'], 0x50000006)
self.assertEquals(registers['r6'], 0x60000007)
self.assertEquals(registers['r7'], 0x70000008)
self.assertEquals(registers['r8'], 0x80000009)
# Skip r9 because it is not supposed to be settable or readable
# by untrusted code.
self.assertEquals(registers['r10'], 0xa000000b)
self.assertEquals(registers['r11'], 0xb000000c)
self.assertEquals(registers['r12'], 0xc000000d)
self.assertEquals(registers['r13'], 0x12345678)
self.assertEquals(registers['r14'], 0xe000000f)
self.assertEquals(registers['cpsr'] & ARM_USER_CPSR_FLAGS_MASK,
(1 << 29) | (1 << 27))
else:
raise AssertionError('Unknown architecture')
expected_fault_addr = GetSymbols()['fault_addr']
if ARCH == 'x86-64':
expected_fault_addr += registers['r15']
self.assertEquals(registers[IP_REG[ARCH]], expected_fault_addr)
# Test that we can write registers.
def CheckWriteRegisters(self, connection):
if ARCH == 'x86-32':
reg_name = 'edx'
elif ARCH == 'x86-64':
reg_name = 'rdx'
elif ARCH == 'arm':
reg_name = 'r0'
else:
raise AssertionError('Unknown architecture')
# Read registers.
regs = DecodeRegs(connection.RspRequest('g'))
# Change a register.
regs[reg_name] += 1
new_value = regs[reg_name]
# Write registers.
self.assertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
# Read registers. Check for a new value.
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEquals(regs[reg_name], new_value)
# TODO: Resume execution and check that changing the registers really
# influenced the program's execution. This would require changing
# debugger_test.c.
def CheckReadOnlyRegisters(self, connection):
if ARCH == 'x86-32':
sample_read_only_regs = ['cs', 'ds']
elif ARCH == 'x86-64':
sample_read_only_regs = ['r15', 'cs', 'ds']
elif ARCH == 'arm':
sample_read_only_regs = []
else:
raise AssertionError('Unknown architecture')
for reg_name in sample_read_only_regs:
# Read registers.
regs = DecodeRegs(connection.RspRequest('g'))
# Change a register.
old_value = regs[reg_name]
regs[reg_name] += 1
# Write registers.
self.assertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
# Read registers. Check for an old value.
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEquals(regs[reg_name], old_value)
# Test that reading from an unreadable address gives a sensible error.
def CheckReadMemoryAtInvalidAddr(self, connection):
mem_addr = 0
result = connection.RspRequest('m%x,%x' % (mem_addr, 8))
self.assertEquals(result, 'E03')
# Run tests on debugger_test.c binary.
def test_debugger_test(self):
with LaunchDebugStub('test_getting_registers') as connection:
# Tell the process to continue, because it starts at the
# breakpoint set at its start address.
reply = connection.RspRequest('c')
if ARCH == 'arm':
# The process should have stopped on a BKPT instruction.
AssertReplySignal(reply, NACL_SIGTRAP)
else:
# The process should have stopped on a HLT instruction.
AssertReplySignal(reply, NACL_SIGSEGV)
self.CheckTargetXml(connection)
self.CheckReadRegisters(connection)
self.CheckWriteRegisters(connection)
self.CheckReadOnlyRegisters(connection)
def test_jump_to_address_zero(self):
with LaunchDebugStub('test_jump_to_address_zero') as connection:
# Continue from initial breakpoint.
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGSEGV)
registers = DecodeRegs(connection.RspRequest('g'))
if ARCH == 'x86-64':
self.assertEquals(registers[IP_REG[ARCH]], registers['r15'])
else:
self.assertEquals(registers[IP_REG[ARCH]], 0)
def test_reading_and_writing_memory(self):
# Any arguments to the nexe would work here because we do not run
# the executable beyond the initial breakpoint.
with LaunchDebugStub('test_getting_registers') as connection:
mem_addr = GetSymbols()['g_example_var']
# Check reading memory.
expected_data = 'some_debug_stub_test_data\0'
reply = connection.RspRequest('m%x,%x' % (mem_addr, len(expected_data)))
self.assertEquals(DecodeHex(reply), expected_data)
# On x86-64, for reading/writing memory, the debug stub accepts
# untrusted addresses with or without the %r15 sandbox base
# address added, because GDB uses both.
# TODO(eaeltsin): Fix GDB to not use addresses with %r15 added,
# and change the expected result in the check below.
if ARCH == 'x86-64':
registers = DecodeRegs(connection.RspRequest('g'))
sandbox_base_addr = registers['r15']
reply = connection.RspRequest('m%x,%x' % (sandbox_base_addr + mem_addr,
len(expected_data)))
self.assertEquals(DecodeHex(reply), expected_data)
# Check writing memory.
new_data = 'replacement_data\0'
assert len(new_data) < len(expected_data)
reply = connection.RspRequest('M%x,%x:%s' % (mem_addr, len(new_data),
EncodeHex(new_data)))
self.assertEquals(reply, 'OK')
# Check that we can read back what we wrote.
reply = connection.RspRequest('m%x,%x' % (mem_addr, len(new_data)))
self.assertEquals(DecodeHex(reply), new_data)
self.CheckReadMemoryAtInvalidAddr(connection)
def test_exit_code(self):
with LaunchDebugStub('test_exit_code') as connection:
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W02')
# Single-step and check IP corresponds to debugger_test:test_single_step
def CheckSingleStep(self, connection, step_command, thread_id):
if ARCH == 'x86-32':
instruction_sizes = [1, 2, 3, 6]
elif ARCH == 'x86-64':
instruction_sizes = [1, 3, 4, 6]
else:
raise AssertionError('Unknown architecture')
ip = DecodeRegs(connection.RspRequest('g'))[IP_REG[ARCH]]
for size in instruction_sizes:
reply = connection.RspRequest(step_command)
AssertReplySignal(reply, NACL_SIGTRAP)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'], thread_id)
ip += size
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEqual(regs[IP_REG[ARCH]], ip)
# The trap flag should be reported as unset.
self.assertEqual(regs['eflags'] & X86_TRAP_FLAG, 0)
def test_single_step(self):
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
with LaunchDebugStub('test_single_step') as connection:
# We expect test_single_step() to stop at a HLT instruction.
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGSEGV)
tid = ParseThreadStopReply(reply)['thread_id']
# Skip past the single-byte HLT instruction.
regs = DecodeRegs(connection.RspRequest('g'))
regs[IP_REG[ARCH]] += 1
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
self.CheckSingleStep(connection, 's', tid)
# Check that we can continue after single-stepping.
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W00')
def test_vCont(self):
# Basically repeat test_single_step, but using vCont commands.
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
with LaunchDebugStub('test_single_step') as connection:
# Test if vCont is supported.
reply = connection.RspRequest('vCont?')
self.assertEqual(reply, 'vCont;s;S;c;C')
# Continue using vCont.
# We expect test_single_step() to stop at a HLT instruction.
reply = connection.RspRequest('vCont;c')
AssertReplySignal(reply, NACL_SIGSEGV)
# Get signalled thread id.
tid = ParseThreadStopReply(reply)['thread_id']
# Skip past the single-byte HLT instruction.
regs = DecodeRegs(connection.RspRequest('g'))
regs[IP_REG[ARCH]] += 1
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
self.CheckSingleStep(connection, 'vCont;s:%x' % tid, tid)
# Single step one thread and continue all others.
reply = connection.RspRequest('vCont;s:%x;c' % tid)
# WARNING! This check is valid in single-threaded case only!
# In multi-threaded case another thread might stop first.
self.assertEqual(reply, 'T05thread:%x;' % tid)
# Try to continue the thread and to single-step all others.
reply = connection.RspRequest('vCont;c:%x;s' % tid)
self.assertTrue(reply.startswith('E'))
# Try to single-step wrong thread.
reply = connection.RspRequest('vCont;s:%x' % (tid + 2))
self.assertTrue(reply.startswith('E'))
# Try to single-step all threads.
reply = connection.RspRequest('vCont;s')
self.assertTrue(reply.startswith('E'))
def test_interrupt(self):
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
func_addr = GetSymbols()['test_interrupt']
with LaunchDebugStub('test_interrupt') as connection:
# Single stepping inside syscalls doesn't work. So we need to reach
# a point where interrupt will not catch the program inside syscall.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGTRAP)
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Continue (program will spin forever), then interrupt.
connection.RspSendOnly('c')
reply = connection.RspInterrupt()
self.assertEqual(reply, 'T00')
# Single-step.
reply = connection.RspRequest('s')
AssertReplySignal(reply, NACL_SIGTRAP)
def test_modifying_code_is_disallowed(self):
with LaunchDebugStub('test_setting_breakpoint') as connection:
# Pick an arbitrary address in the code segment.
func_addr = GetSymbols()['breakpoint_target_func']
# Writing to the code area should be disallowed.
data = '\x00'
write_command = 'M%x,%x:%s' % (func_addr, len(data), EncodeHex(data))
reply = connection.RspRequest(write_command)
self.assertEquals(reply, 'E03')
class DebugStubBreakpointTest(unittest.TestCase):
def CheckInstructionPtr(self, connection, expected_ip):
ip_value = DecodeRegs(connection.RspRequest('g'))[IP_REG[ARCH]]
if ARCH == 'x86-64':
# TODO(mseaborn): The debug stub should probably omit the top
# bits of %rip automatically.
ip_value &= 0xffffffff
self.assertEquals(ip_value, expected_ip)
def test_setting_and_removing_breakpoint(self):
func_addr = GetSymbols()['breakpoint_target_func']
with LaunchDebugStub('test_setting_breakpoint') as connection:
# Set a breakpoint.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Requesting a breakpoint on an address that already has a
# breakpoint should return an error.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'E03')
# When we run the program, we should hit the breakpoint. When
# we continue, we should hit the breakpoint again because it has
# not been removed: the debug stub does not step through
# breakpoints automatically.
for i in xrange(2):
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGTRAP)
self.CheckInstructionPtr(connection, func_addr)
# If we continue a single thread, the fault the thread receives
# should still be recognized as a breakpoint.
tid = ParseThreadStopReply(reply)['thread_id']
reply = connection.RspRequest('vCont;c:%x' % tid)
AssertReplySignal(reply, NACL_SIGTRAP)
self.CheckInstructionPtr(connection, func_addr)
# Check that we can remove the breakpoint.
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Requesting removing a breakpoint on an address that does not
# have one should return an error.
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'E03')
# After continuing, we should not hit the breakpoint again, and
# the program should run to completion.
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W00')
def test_setting_breakpoint_on_invalid_address(self):
with LaunchDebugStub('test_exit_code') as connection:
# Requesting a breakpoint on an invalid address should give an error.
reply = connection.RspRequest('Z0,%x,1' % (1 << 32))
self.assertEquals(reply, 'E03')
def test_setting_breakpoint_on_data_address(self):
with LaunchDebugStub('test_exit_code') as connection:
# Pick an arbitrary address in the data segment.
data_addr = GetSymbols()['g_main_thread_var']
# Requesting a breakpoint on a non-code address should give an error.
reply = connection.RspRequest('Z0,%x,1' % data_addr)
self.assertEquals(reply, 'E03')
def test_breakpoint_memory_changes_are_hidden(self):
func_addr = GetSymbols()['breakpoint_target_func']
with LaunchDebugStub('test_setting_breakpoint') as connection:
chunk_size = 32
old_memory = ReadMemory(connection, func_addr, chunk_size)
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# The debug stub should hide the memory modification.
new_memory = ReadMemory(connection, func_addr, chunk_size)
self.assertEquals(new_memory, old_memory)
# Check reading a subset of the range. (This will only be a
# proper subset on architectures where the breakpoint size is
# >1, such as ARM not but x86.)
new_memory = ReadMemory(connection, func_addr, 1)
self.assertEquals(new_memory, old_memory[:1])
class DebugStubThreadSuspensionTest(unittest.TestCase):
def SkipBreakpoint(self, connection, stop_reply):
# Skip past the faulting instruction in debugger_test.c's
# breakpoint() function.
regs = DecodeRegs(connection.RspRequest('g'))
if ARCH in ('x86-32', 'x86-64'):
AssertReplySignal(stop_reply, NACL_SIGSEGV)
# Skip past the single-byte HLT instruction.
regs[IP_REG[ARCH]] += 1
elif ARCH == 'arm':
AssertReplySignal(stop_reply, NACL_SIGTRAP)
bundle_size = 16
assert regs['r15'] % bundle_size == 0, regs['r15']
regs['r15'] += bundle_size
else:
raise AssertionError('Unknown architecture')
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
def WaitForTestThreadsToStart(self, connection, symbols):
# Wait until:
# * The main thread starts to modify g_main_thread_var.
# * The child thread executes a breakpoint.
old_value = ReadUint32(connection, symbols['g_main_thread_var'])
while True:
reply = connection.RspRequest('c')
self.SkipBreakpoint(connection, reply)
child_thread_id = ParseThreadStopReply(reply)['thread_id']
if ReadUint32(connection, symbols['g_main_thread_var']) != old_value:
break
return child_thread_id
def test_continuing_thread_with_others_suspended(self):
with LaunchDebugStub('test_suspending_threads') as connection:
symbols = GetSymbols()
child_thread_id = self.WaitForTestThreadsToStart(connection, symbols)
# Test continuing a single thread while other threads remain
# suspended.
for _ in range(3):
main_thread_val = ReadUint32(connection, symbols['g_main_thread_var'])
child_thread_val = ReadUint32(connection, symbols['g_child_thread_var'])
reply = connection.RspRequest('vCont;c:%x' % child_thread_id)
self.SkipBreakpoint(connection, reply)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'],
child_thread_id)
# The main thread should not be allowed to run, so should not
# modify g_main_thread_var.
self.assertEquals(
ReadUint32(connection, symbols['g_main_thread_var']),
main_thread_val)
# The child thread should always modify g_child_thread_var
# between each breakpoint.
self.assertNotEquals(
ReadUint32(connection, symbols['g_child_thread_var']),
child_thread_val)
def test_single_stepping_thread_with_others_suspended(self):
with LaunchDebugStub('test_suspending_threads') as connection:
symbols = GetSymbols()
child_thread_id = self.WaitForTestThreadsToStart(connection, symbols)
# Test single-stepping a single thread while other threads
# remain suspended.
for _ in range(3):
main_thread_val = ReadUint32(connection, symbols['g_main_thread_var'])
child_thread_val = ReadUint32(connection, symbols['g_child_thread_var'])
while True:
reply = connection.RspRequest('vCont;s:%x' % child_thread_id)
if (ARCH in ('x86-32', 'x86-64') and
ParseThreadStopReply(reply)['signal'] == NACL_SIGTRAP):
# We single-stepped through an instruction without
# otherwise faulting. We did not hit the breakpoint, so
# there is nothing to do.
pass
else:
self.SkipBreakpoint(connection, reply)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'],
child_thread_id)
# The main thread should not be allowed to run, so should not
# modify g_main_thread_var.
self.assertEquals(
ReadUint32(connection, symbols['g_main_thread_var']),
main_thread_val)
# Eventually, the child thread should modify g_child_thread_var.
if (ReadUint32(connection, symbols['g_child_thread_var'])
!= child_thread_val):
break
def Main():
# TODO(mseaborn): Clean up to remove the global variables. They are
# currently here because unittest does not help with making
# parameterised tests.
index = sys.argv.index('--')
args = sys.argv[index + 1:]
# The remaining arguments go to unittest.main().
sys.argv = sys.argv[:index]
global ARCH
global NM_TOOL
global SEL_LDR_COMMAND
ARCH = args.pop(0)
NM_TOOL = args.pop(0)
SEL_LDR_COMMAND = args
unittest.main()
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
RachitKansal/scikit-learn
|
examples/applications/topics_extraction_with_nmf_lda.py
|
133
|
3517
|
"""
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
bsd-3-clause
|
yonglehou/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
wheeler-microfluidics/dmf-control-board-firmware
|
dmf_control_board_firmware/calibrate/impedance_benchmarks.py
|
3
|
9822
|
# coding: utf-8
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
from matplotlib.gridspec import GridSpec
import numpy as np
pd.set_option('display.width', 300)
def plot_capacitance_vs_frequency(df, **kwargs):
cleaned_df = df.dropna().copy()
fb_resistor_df = cleaned_df.set_index(cleaned_df.fb_resistor)
axis = kwargs.pop('axis', None)
s = kwargs.pop('s', 50)
facecolor = kwargs.pop('facecolor', 'none')
if axis is None:
fig = plt.figure()
axis = fig.add_subplot(111)
stats = fb_resistor_df[['frequency', 'C']].describe()
axis.set_xlim(0.8 * stats.frequency['min'], 1.2 * stats.frequency['max'])
axis.set_ylim(0.8 * stats.C['min'], 1.2 * stats.C['max'])
frequencies = fb_resistor_df.frequency.unique()
# Plot nominal test capacitance lines.
for C in fb_resistor_df.test_capacitor.unique():
axis.plot(frequencies, [C] * len(frequencies), '--', alpha=0.7,
color='0.5', linewidth=1)
# Plot scatter of _measured_ capacitance vs. frequency.
for k, v in fb_resistor_df[['frequency', 'C']].groupby(level=0):
try:
color = axis._get_lines.color_cycle.next()
except: # make compatible with matplotlib v1.5
color = axis._get_lines.prop_cycler.next()['color']
v.plot(kind='scatter', x='frequency', y='C', loglog=True,
label='R$_{fb,%d}$' % k, ax=axis, color=color,
s=s, facecolor=facecolor, **kwargs)
axis.legend(loc='upper right')
axis.set_xlabel('Frequency (Hz)')
axis.set_ylabel('C$_{device}$ (F)')
axis.set_title('C$_{device}$')
plt.tight_layout()
return axis
def estimate_relative_error_in_nominal_capacitance(df):
# Calculate the relative percentage difference in the mean capacitance
# values measured relative to the nominal values.
cleaned_df = df.dropna().copy()
C_relative_error = (cleaned_df.groupby('test_capacitor')
.apply(lambda x: ((x['C'] - x['test_capacitor']) /
x['test_capacitor']).describe()))
pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
print ('Estimated relative error in nominal capacitance values = %.1f%% '
' +/-%.1f%%' % (C_relative_error['mean'].mean() * 100,
C_relative_error['mean'].std() * 100))
print C_relative_error[['mean', 'std']] * 100
print
return C_relative_error
def plot_impedance_vs_frequency(data):
test_loads = data['test_loads']
frequencies = data['frequencies']
C = data['C']
fb_resistor = data['fb_resistor']
calibration = data['calibration']
# create a masked array version of the capacitance matrix
C = np.ma.masked_invalid(C)
# create frequency matrix to match shape of C
f = np.tile(np.reshape(frequencies,
[len(frequencies)] + [1]*(len(C.shape) - 1)),
[1] + list(C.shape[1:]))
# Plot the impedance of each experiment vs frequency (with the data points
# color-coded according to the feedback resistor).
# Note that impedance, $Z$, can be computed as:
#
# 1
# Z = ──────────
# 2⋅π⋅freq⋅C
#
plt.figure(figsize=figsize)
legend = []
for i in range(len(calibration.R_fb)):
legend.append("R$_{fb,%d}$" % i)
ind = mlab.find(fb_resistor == i)
plt.loglog(f.flatten()[ind], 1.0 / (2 * np.pi * f.flatten()[ind] *
C.flatten()[ind]), 'o')
plt.xlim(0.8 * np.min(frequencies), 1.2 * np.max(frequencies))
for C_device in test_loads:
# TODO: What is the reason for the `np.ones` below?
plt.plot(frequencies, 1.0 / (2 * np.pi * C_device *
np.ones(len(frequencies)) * frequencies),
'--', color='0.5')
plt.legend(legend)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Z$_{device}$ ($\Omega$)')
plt.title('Z$_{device}$')
plt.tight_layout()
def calculate_stats(df, groupby='test_capacitor'):
cleaned_df = df.dropna().copy()
stats = cleaned_df.groupby(groupby)['C'].agg(['mean', 'std', 'median'])
stats['bias %'] = (cleaned_df.groupby(groupby)
.apply(lambda x: ((x['C'] - x['test_capacitor'])).mean()
/ x['C'].mean())) * 100
stats['RMSE %'] = 100 * (cleaned_df.groupby(groupby)
.apply(lambda x: np.sqrt(((x['C'] -
x['test_capacitor']) **
2).mean()) /
x['C'].mean()))
stats['cv %'] = stats['std'] / stats['mean'] * 100
return stats
def print_detailed_stats_by_condition(data, stats):
test_loads = data['test_loads']
frequencies = data['frequencies']
mean = stats['mean']
CV = stats['CV']
bias = stats['bias']
RMSE = stats['RMSE']
# print the RMSE, CV, and bias for each test capacitor and frequency combination
for i, (channel, C_device) in enumerate(test_loads):
print "\n%.2f pF" % (C_device*1e12)
for j in range(len(frequencies)):
print "%.1fkHz: mean(C)=%.2f pF, RMSE=%.1f%%, CV=%.1f%%, bias=%.1f%%" % (frequencies[j]/1e3,
1e12*mean[j,i],
RMSE[j,i],
CV[j,i],
bias[j,i])
print
def plot_measured_vs_nominal_capacitance_for_each_frequency(data, stats):
# plot the measured vs nominal capacitance for each frequency
frequencies = data['frequencies']
test_loads = data['test_loads']
mean_C = stats['mean']
std_C = stats['std']
for i in range(len(frequencies)):
plt.figure()
plt.title('(frequency=%.2fkHz)' % (frequencies[i]/1e3))
for j, (channel, C_device) in enumerate(test_loads):
plt.errorbar(C_device, mean_C[i,j],
std_C[i,j], fmt='k')
C_device = np.array([x for channel, x in test_loads])
plt.loglog(C_device, C_device, 'k:')
plt.xlim(min(C_device)*.9, max(C_device)*1.1)
plt.ylim(min(C_device)*.9, max(C_device)*1.1)
plt.xlabel('C$_{nom}$ (F)')
plt.ylabel('C$_{measured}$ (F)')
def plot_colormap(stats, column, axis=None, fig=None):
freq_vs_C_rmse = stats.reindex_axis(
pd.Index([(i, j) for i in stats.index.levels[0]
for j in stats.index.levels[1]],
name=['test_capacitor',
'frequency'])).reset_index().pivot(index='frequency',
columns=
'test_capacitor',
values=column)
if axis is None:
fig = plt.figure()
axis = fig.add_subplot(111)
frequencies = stats.index.levels[1]
axis.set_xlabel('Capacitance')
axis.set_ylabel('Frequency')
vmin = freq_vs_C_rmse.fillna(0).values.min()
vmax = freq_vs_C_rmse.fillna(0).values.max()
if vmin < 0:
vmax = np.abs([vmin, vmax]).max()
vmin = -vmax
cmap=plt.cm.coolwarm
else:
vmin = 0
cmap=plt.cm.Reds
mesh = axis.pcolormesh(freq_vs_C_rmse.fillna(0).values, vmin=vmin,
vmax=vmax, cmap=cmap)
if fig is not None:
fig.colorbar(mesh)
else:
plt.colorbar()
axis.set_xticks(np.arange(freq_vs_C_rmse.shape[1]) + 0.5)
axis.set_xticklabels(["%.1fpF" % (c*1e12)
for c in freq_vs_C_rmse.columns],
rotation=90)
axis.set_yticks(np.arange(len(frequencies)) + 0.5)
axis.set_yticklabels(["%.2fkHz" % (f / 1e3) for f in frequencies])
axis.set_xlim(0, freq_vs_C_rmse.shape[1])
axis.set_ylim(0, freq_vs_C_rmse.shape[0])
return axis
def plot_stat_summary(df, fig=None):
'''
Plot stats grouped by test capacitor load _and_ frequency.
In other words, we calculate the mean of all samples in the data
frame for each test capacitance and frequency pairing, plotting
the following stats:
- Root mean squared error
- Coefficient of variation
- Bias
## [Coefficient of variation][1] ##
> In probability theory and statistics, the coefficient of
> variation (CV) is a normalized measure of dispersion of a
> probability distribution or frequency distribution. It is defined
> as the ratio of the standard deviation to the mean.
[1]: http://en.wikipedia.org/wiki/Coefficient_of_variation
'''
if fig is None:
fig = plt.figure(figsize=(8, 8))
# Define a subplot layout, 3 rows, 2 columns
grid = GridSpec(3, 2)
stats = calculate_stats(df, groupby=['test_capacitor',
'frequency']).dropna()
for i, stat in enumerate(['RMSE %', 'cv %', 'bias %']):
axis = fig.add_subplot(grid[i, 0])
axis.set_title(stat)
# Plot a colormap to show how the statistical value changes
# according to frequency/capacitance pairs.
plot_colormap(stats, stat, axis=axis, fig=fig)
axis = fig.add_subplot(grid[i, 1])
axis.set_title(stat)
# Plot a histogram to show the distribution of statistical
# values across all frequency/capacitance pairs.
try:
axis.hist(stats[stat].values, bins=50)
except AttributeError:
print stats[stat].describe()
fig.tight_layout()
|
bsd-3-clause
|
dolel13/spyre
|
examples/sliders_examples.py
|
1
|
1198
|
from spyre import server
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
class SlidersApp(server.App):
title = "Decaying Sine Wave"
inputs =[{ "input_type":'slider',
"label": 'Frequency',
"min" : 1,
"max" : 100,
"value" : 50,
"variable_name": 'freq',
"action_id": 'plot'},
{ "input_type":'slider',
"label": 'Decay Rate',
"min" : 0,
"max" : 2,
"step" : 0.01,
"value" : 0.5,
"variable_name": 'decay',
"action_id": 'plot'}]
outputs = [{ "output_type" : "plot",
"output_id" : "plot",
"on_page_load" : True }]
def getPlot(self, params):
f = float(params['freq'])
d = float(params['decay'])
x = np.arange(0,6*pi,pi/50)
y1 = np.sin(f*x/(2*pi))
y2 = np.exp(-x*d)
y3 = np.sin(f*x/(2*pi))*np.exp(-x*d)
fig = plt.figure()
splt1 = fig.add_subplot(3,1,1)
splt1.plot(x,y1) # sine wave
splt1.axes.get_xaxis().set_visible(False)
splt2 = fig.add_subplot(3,1,2)
splt2.plot(x,y2) # exponential decay
splt2.axes.get_xaxis().set_visible(False)
splt3 = fig.add_subplot(3,1,3)
splt3.plot(x,y3) #sine wave decay
return fig
app = SlidersApp()
app.launch(port=9094)
|
mit
|
DJArmstrong/autovet
|
Features/old/Centroiding/scripts/old/analyse_neighbours.py
|
2
|
13354
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 20 17:44:11 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: [email protected]
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import lightcurve_tools
import stacked_images
import pandas as pd
#from astropy.stats import LombScargle
#from scipy.signal import lombscargle
#import lightcurve_tools, get_scatter_color
import os
from helper_functions import mystr
def plot(dic, dic_nb, outdir, fieldname, obj_id, ngts_version, dt=0.01, show_plot=False):
figs = {}
figs[0] = plot_phasecurve_1_siderial_day( dic )
figs[1] = plot_detrending_steps_evaluation( dic, dic_nb, dt )
figs[2] = plot_neighbours_phasecurve_and_location( fieldname, ngts_version, dic, dic_nb, dt )
figs[3] = plot_hjd_curves( dic, dic_nb )
save_pdf( figs, outdir, fieldname, obj_id, ngts_version, show_plot )
###########################################################################
#::: save all plots in one pdf per target object
###########################################################################
def save_pdf( figs, outdir, fieldname, obj_id, ngts_version, show_plot ):
outfilename = os.path.join( outdir, fieldname + '_' + obj_id + '_' + ngts_version + '_centroid_appendix.pdf' )
with PdfPages( outfilename ) as pdf:
for ind in figs:
pdf.savefig( figs[ind] )
print 'Plots saved as ' + outfilename
if show_plot == False: plt.close('all')
def plot_detrending_steps_evaluation( dic, dic_nb, dt ):
fig, axes = plt.subplots(1,2,figsize=(16,16))
offset = 0.02
xtext = -0.25
ytext = 0.01
#::: phasefolded curves
centdx = dic['CENTDX']
centdy = dic['CENTDY']
plot_phasecurves(dic, dt, centdx, centdy, color='g', axes=axes, offset=0*offset )
axes[0].text( xtext, ytext-0*offset, 'target (raw)')
centdx = dic['CENTDX_f']
centdy = dic['CENTDY_f']
plot_phasecurves( dic, dt, centdx, centdy, color='orange', axes=axes, offset=1*offset )
axes[0].text( xtext, ytext-1*offset, 'target (flattened externally)')
centdx = np.nanmean( dic_nb['CENTDX'], axis=0 )
centdy = np.nanmean( dic_nb['CENTDY'], axis=0 )
plot_phasecurves( dic, dt, centdx, centdy, color='b', axes=axes, offset=2*offset )
axes[0].text( xtext, ytext-2*offset, 'neighbours (mean of all)')
centdx = dic['CENTDX_f'] - np.nanmean( dic_nb['CENTDX'], axis=0 )
centdy = dic['CENTDY_f'] - np.nanmean( dic_nb['CENTDY'], axis=0 )
plot_phasecurves( dic, dt, centdx, centdy, color='orange', axes=axes, offset=3*offset )
axes[0].text( xtext, ytext-3*offset, 'target - neighbours (mean of all)')
centdx = dic_nb['CENTDX_ref_mean']
centdy = dic_nb['CENTDY_ref_mean']
plot_phasecurves( dic, dt, centdx, centdy, color='b', axes=axes, offset=4*offset )
axes[0].text( xtext, ytext-4*offset, 'reference stars (mean of best fit)')
centdx = dic['CENTDX_fd']
centdy = dic['CENTDY_fd']
plot_phasecurves( dic, dt, centdx, centdy, color='orange', axes=axes, offset=5*offset )
axes[0].text( xtext, ytext-5*offset, 'target (flattened and detrended, best fit)')
centdx = dic['CENTDX_fda']
centdy = dic['CENTDY_fda']
plot_phasecurves( dic, dt, centdx, centdy, color='orange', axes=axes, offset=6*offset )
axes[0].text( xtext, ytext-6*offset, 'target (flattened, detrended and 1 day siderial airmass correction)')
axes[0].set( xlim=[-0.25,0.75], ylim=[-0.02-6*offset,0.02] )
axes[1].set( xlim=[-0.25,0.75], ylim=[-0.02-6*offset,0.02] )
plt.tight_layout()
return fig
def plot_hjd_curves( dic, dic_nb ):
#::: set y offsets
offset = 0.1
#::: set plotting range
N_points = len(dic['HJD_BIN'])
# N_points = 1000
#::: set x-axis
x = np.arange(N_points)
# x = dic['HJD_BIN'][slice(None)]
# print '***********************************'
# print x
# print dic_nb['CENTDX_ref_mean_BIN'][slice(None)]
# print '***********************************'
# print x.shape
# print dic_nb['CENTDX_ref_mean_BIN'][slice(None)].shape
# print '***********************************'
# print dic['SYSREM_FLUX3'].shape
# print dic_nb['CENTDX'].shape
# print dic_nb['CENTDY'].shape
#::: set scatter color
c = dic['COLOR_BIN']
cmap = 'jet'
#::: plot
fig, axes = plt.subplots(4,1, sharex=True, sharey=False, figsize=(100,16))
texts = ['raw','reference stars','flattened + detrended','siderial day airmass correction', 'result']
ax = axes[0]
ax.scatter( x, dic['SYSREM_FLUX3_BIN'][slice(None)], c=c, rasterized=True, cmap=cmap )
ax.set( ylabel='FLUX (BINNED)' )
ax = axes[1]
ax.scatter( x, dic['CENTDX_f_BIN'][slice(None)], c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic_nb['CENTDX_ref_mean_BIN'][slice(None)] - offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic['CENTDX_fd_BIN'][slice(None)] - 2*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
for i, text in enumerate( texts ):
ax.text( x[0], -i*offset, text )
ax.axhline( -i*offset, color='k' )
ax.scatter( x, dic['poly_CENTDX_BIN'][slice(None)] - 3*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic['CENTDX_fda_BIN'][slice(None)] - 4*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.set( ylim=[-0.1-4*offset,0.1], ylabel='CENTDX (BINNED)' )
ax = axes[2]
ax.scatter( x, dic['CENTDY_f_BIN'][slice(None)], c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic_nb['CENTDY_ref_mean_BIN'][slice(None)] - offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic['CENTDY_fd_BIN'][slice(None)] - 2*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
for i, text in enumerate( texts ):
ax.text( x[0], -i*offset, text )
ax.axhline( -i*offset, color='k' )
ax.scatter( x, dic['poly_CENTDY_BIN'][slice(None)] - 3*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.scatter( x, dic['CENTDY_fda_BIN'][slice(None)] - 4*offset, c=c, rasterized=True, cmap=cmap, vmin=-1, vmax=1 )
ax.set( ylim=[-0.1-4*offset,0.1], ylabel='CENTDY (BINNED)' )
ax = axes[3]
ax.scatter( x, dic['AIRMASS_BIN'][slice(None)], c=c, rasterized=True, cmap=cmap )
ax.set( ylim=[1.,2.], ylabel='AIRMASS (BINNED)' )
# ax = axes[4]
# ax.scatter( x, dic['COLOR_BIN'][slice(None)], c=c, rasterized=True, cmap=cmap )
# ax.set( ylim=[-1.,1.], ylabel='COLOR_BIN' )
ax.set( xlim=[x[0],x[-1]] )
plt.tight_layout()
return fig
def plot_phasecurve_1_siderial_day( dic ):
'''
1 mean siderial day =
( 23 + 56/60. + 4.0916/3600. ) / 24. = 0.9972695787 days
see e.g. https://en.wikipedia.org/wiki/Sidereal_time
Note: dic['poly_CENTDX'] is a function!
'''
#::: show airmass as proof of concept for the siderial day phase folding
# fig, axes = plt.subplots(1,2,figsize=(16,8))
#
# axes[0].scatter( dic['HJD_PHASE_1sidday'], dic['COLOR_PHASE_1sidday'], c=dic['COLOR_PHASE_1sidday'], rasterized=True, cmap='jet', vmin=-1, vmax=1)
# axes[0].set( ylim=[-1.,1.] )
#
# axes[1].scatter( dic['HJD_PHASE_1sidday'], dic['AIRMASS_PHASE_1sidday'], c=dic['COLOR_PHASE_1sidday'], rasterized=True, cmap='jet', vmin=-1, vmax=1)
# axes[1].set( ylim=[1.,2.] )
#::: show FLUX and CENTDXY
fig, axes = plt.subplots(3,1,figsize=(8,6), sharex=True)
# axes[0].scatter( dic['PHI'][::10], dic['CENTDX_fd'][::10], c=dic['COLOR'][::10], rasterized=True, cmap='jet' )
axes[0].scatter( dic['HJD_PHASE_1sidday'], dic['CENTDX_fd_PHASE_1sidday'], c=dic['COLOR_PHASE_1sidday'], rasterized=True, cmap='jet', vmin=-1, vmax=1 )
# axes[0].errorbar( dic['HJD_PHASE_1sidday'], dic['CENTDX_fd_PHASE_1sidday'], yerr=dic['CENTDX_fd_PHASE_1sidday_ERR'], fmt='.', color='k' )
axes[1].scatter( dic['HJD_PHASE_1sidday'], dic['CENTDY_fd_PHASE_1sidday'], c=dic['COLOR_PHASE_1sidday'], rasterized=True, cmap='jet', vmin=-1, vmax=1 )
# axes[1].errorbar( dic['HJD_PHASE_1sidday'], dic['CENTDY_fd_PHASE_1sidday'], yerr=dic['CENTDY_fd_PHASE_1sidday_ERR'], fmt='.', color='k' )
axes[2].scatter( dic['HJD_PHASE_1sidday'], dic['SYSREM_FLUX3_PHASE_1sidday'], c=dic['COLOR_PHASE_1sidday'], rasterized=True, cmap='jet', vmin=-1, vmax=1 )
# axes[2].errorbar( dic['HJD_PHASE_1sidday'], dic['SYSREM_FLUX3_PHASE_1sidday'], yerr=dic['SYSREM_FLUX3_PHASE_1sidday_ERR'], fmt='.', color='k' )
#::: show FLUX and CENTDXY trends / polyfits
axes[0].plot( dic['HJD_PHASE_1sidday'], dic['polyfct_CENTDX'](dic['HJD_PHASE_1sidday']), 'r-' )
axes[0].scatter( dic['HJD_PHASE_1sidday'], dic['CENTDX_fd_PHASE_1sidday'] - dic['polyfct_CENTDX'](dic['HJD_PHASE_1sidday']), c='r', rasterized=True )
axes[1].plot( dic['HJD_PHASE_1sidday'], dic['polyfct_CENTDY'](dic['HJD_PHASE_1sidday']), 'r-' )
axes[1].scatter( dic['HJD_PHASE_1sidday'], dic['CENTDY_fd_PHASE_1sidday'] - dic['polyfct_CENTDY'](dic['HJD_PHASE_1sidday']), c='r', rasterized=True )
plt.tight_layout()
return fig
def plot_neighbours_phasecurve_and_location( fieldname, ngts_version, dic, dic_nb, dt ):
N_nb = len(dic_nb['OBJ_ID'])
fig, axes = plt.subplots(N_nb, 5, figsize=(20,N_nb*4))
for i in range(N_nb):
centdx = dic_nb['CENTDX'][i,:]
centdy = dic_nb['CENTDY'][i,:]
plot_phasecurves_extended( fieldname, ngts_version, dic, dic_nb, i, dt, centdx, centdy, axes=axes[i,:])
plt.tight_layout()
return fig
def plot_phasecurves( dic, dt, centdx, centdy, title=None, color='b', axes=None, offset=None ):
hjd_phase, centdx_c_phase, centdx_c_phase_err, _, _ = lightcurve_tools.phase_fold( dic['HJD'], centdx - np.nanmean(centdx), dic['PERIOD'], dic['EPOCH'], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
hjd_phase, centdy_c_phase, centdy_c_phase_err, _, _ = lightcurve_tools.phase_fold( dic['HJD'], centdy - np.nanmean(centdy), dic['PERIOD'], dic['EPOCH'], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)
if offset is not None:
centdx_c_phase -= offset
centdy_c_phase -= offset
if axes is None:
fig, axes = plt.subplots(1,2, sharex=True, sharey=True, figsize=(12,4))
axes[0].errorbar( hjd_phase, centdx_c_phase, yerr=centdx_c_phase_err, fmt='o', color=color, rasterized=True ) #, color='darkgrey')
axes[0].set_ylabel('CENTDX (in pixel)')
# axes[0].set_ylim([ np.min(centdx_c_phase - centdx_c_phase_err), np.max(centdx_c_phase + centdx_c_phase_err) ])
axes[1].errorbar( hjd_phase, centdy_c_phase, yerr=centdy_c_phase_err, fmt='o', color=color, rasterized=True ) #, color='darkgrey')
axes[1].set_ylabel('CENTDY (in pixel)')
# axes[1].set_ylim([ np.min(centdy_c_phase - centdy_c_phase_err), np.max(centdy_c_phase + centdy_c_phase_err) ])
axes[0].set( xlim=[-0.25,0.75], ylim=[-0.02,0.02] )
axes[1].set( xlim=[-0.25,0.75], ylim=[-0.02,0.02] )
if title is not None:
plt.suptitle( title )
def plot_phasecurves_extended( fieldname, ngts_version, dic, dic_nb, i, dt, centdx, centdy, color='b', axes=None ):
if axes is None:
fig, axes = plt.subplots(1,5, figsize=(20,4))
plot_phasecurves( dic, dt, centdx, centdy, color='b', axes=axes )
axes[2].plot( dic['CCDX'][0], dic['CCDY'][0], 'bo', ms=12 )
axes[2].plot( dic_nb['CCDX_0'], dic_nb['CCDY_0'], 'k.' )
axes[2].plot( dic_nb['CCDX_0'][i], dic_nb['CCDY_0'][i], 'ro', ms=12 )
axes[2].set( xlim=[ dic['CCDX'][0]-150, dic['CCDX'][0]+150 ], ylim=[ dic['CCDY'][0]-150, dic['CCDY'][0]+150 ] )
stacked_images.plot(fieldname, ngts_version, dic_nb['CCDX_0'][i], dic_nb['CCDY_0'][i], r=15, ax=axes[3], show_apt=True, show_cbar=True)
plot_neighbour_info_text(axes[4], dic, dic_nb, i)
###########################################################################
#::: plot info page
###########################################################################
def plot_neighbour_info_text(ax, dic, dic_nb, i):
ax.set_xlim([0,1])
ax.set_ylim([0,1])
ax.axis('off')
ax.text(0,1.0,'OBJ_ID: '+dic_nb['OBJ_ID'][i])
ax.text(0,0.9,'FLUX: '+mystr(dic_nb['FLUX_MEAN'][i],2))
ax.text(0,0.8,'CCDX_0: '+mystr(dic_nb['CCDX_0'][i],2))
ax.text(0,0.7,'CCDY_0: '+mystr(dic_nb['CCDY_0'][i],2))
ax.text(0,0.6,'CCD distance: '+mystr(np.sqrt( (dic['CCDX'][0] - dic_nb['CCDX_0'][i])**2 + (dic['CCDY'][0] - dic_nb['CCDY_0'][i])**2 ),2))
ax.text(0,0.5,'CCD_X distance: '+mystr(( dic['CCDX'][0] - dic_nb['CCDX_0'][i] ),2))
ax.text(0,0.4,'CCD_Y distance: '+mystr(( dic['CCDY'][0] - dic_nb['CCDX_0'][i] ),2))
ax.text(0,0.3,'B-V color: '+mystr(dic_nb['B-V'][i],2))
ax.text(0,0.2,'B-V color difference: '+mystr(dic['B-V'] - dic_nb['B-V'][i],2))
ax.text(0,0.1,'V Mag: '+mystr(dic_nb['Vmag'][i],2))
ax.text(0,0.0,'Corr Coeff X / Y: '+mystr(dic_nb['corrcoeff_x'][i],2) + ' / ' + mystr(dic_nb['corrcoeff_x'][i],2))
|
gpl-3.0
|
bd-j/bsfh
|
prospect/io/read_results.py
|
3
|
20576
|
import sys, os
from copy import deepcopy
import warnings
import pickle, json
import numpy as np
try:
import h5py
except:
pass
try:
from sedpy.observate import load_filters
except:
pass
"""Convenience functions for reading and reconstructing results from a fitting
run, including reconstruction of the model for making posterior samples
"""
__all__ = ["results_from", "emcee_restarter",
"get_sps", "get_model",
"traceplot", "subcorner",
"compare_paramfile"]
def unpick(pickled):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
try:
obj = pickle.loads(pickled, encoding='bytes')
except(TypeError):
obj = pickle.loads(pickled)
return obj
def results_from(filename, model_file=None, dangerous=True, **kwargs):
"""Read a results file with stored model and MCMC chains.
:param filename:
Name and path to the file holding the results. If ``filename`` ends in
"h5" then it is assumed that this is an HDF5 file, otherwise it is
assumed to be a pickle.
:param dangerous: (default, True)
If True, use the stored paramfile text to import the parameter file and
reconstitute the model object. This executes code in the stored
paramfile text during import, and is therefore dangerous.
:returns results:
A dictionary of various results including:
+ `"chain"` - Samples from the posterior probability (ndarray).
+ `"lnprobability"` - The posterior probability of each sample.
+ `"weights"` - The weight of each sample, if `dynesty` was used.
+ `"theta_labels"` - List of strings describing free parameters.
+ `"bestfit"` - The prediction of the data for the posterior sample with
the highest `"lnprobability"`, as a dictionary.
+ `"run_params"` - A dictionary of arguments supplied to prospector at
the time of the fit.
+ `"paramfile_text"` - Text of the file used to run prospector, string
:returns obs:
The obs dictionary
:returns model:
The models.SedModel() object, if it could be regenerated from the stored
`"paramfile_text"`. Otherwise, `None`.
"""
# Read the basic chain, parameter, and run_params info
if filename.split('.')[-1] == 'h5':
res = read_hdf5(filename, **kwargs)
if "_mcmc.h5" in filename:
mf_default = filename.replace('_mcmc.h5', '_model')
else:
mf_default = "x"
else:
with open(filename, 'rb') as rf:
res = pickle.load(rf)
mf_default = filename.replace('_mcmc', '_model')
# Now try to read the model object itself from a pickle
if model_file is None:
mname = mf_default
else:
mname = model_file
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
model, powell_results = read_model(mname, param_file=param_file,
dangerous=dangerous, **kwargs)
if dangerous:
try:
model = get_model(res)
except:
model = None
res['model'] = model
if powell_results is not None:
res["powell_results"] = powell_results
return res, res["obs"], model
def emcee_restarter(restart_from="", niter=32, **kwargs):
"""Get the obs, model, and sps objects from a previous run, as well as the
run_params and initial positions (which are determined from the end of the
last run, and inserted into the run_params dictionary)
:param restart_from:
Name of the file to restart the sampling from. An error is raised if
this does not include an emcee style chain of shape (nwalker, niter,
ndim)
:param niter: (default: 32)
Number of additional iterations to do (added toi run_params)
:returns obs:
The `obs` dictionary used in the last run.
:returns model:
The model object used in the last run.
:returns sps:
The `sps` object used in the last run.
:returns noise:
A tuple of (None, None), since it is assumed the noise model in the
last run was trivial.
:returns run_params:
A dictionary of parameters controlling the operation. This is the same
as used in the last run, but with the "niter" key changed, and a new
"initial_positions" key that gives the ending positions of the emcee
walkers from the last run. The filename from which the run is
restarted is also stored in the "restart_from" key.
"""
result, obs, model = results_from(restart_from)
noise = (None, None)
# check for emcee style outputs
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
msg = "Result file {} does not have a chain of the proper shape."
assert is_emcee, msg.format(restart_from)
sps = get_sps(result)
run_params = deepcopy(result["run_params"])
run_params["niter"] = niter
run_params["restart_from"] = restart_from
initial_positions = result["chain"][:, -1, :]
run_params["initial_positions"] = initial_positions
return obs, model, sps, noise, run_params
def read_model(model_file, param_file=('', ''), dangerous=False, **extras):
"""Read the model pickle. This can be difficult if there are user defined
functions that have to be loaded dynamically. In that case, import the
string version of the paramfile and *then* try to unpickle the model
object.
:param model_file:
String, name and path to the model pickle.
:param dangerous: (default: False)
If True, try to import the given paramfile.
:param param_file:
2-element tuple. The first element is the name of the paramfile, which
will be used to set the name of the imported module. The second
element is the param_file contents as a string. The code in this
string will be imported.
"""
model = powell_results = None
if os.path.exists(model_file):
try:
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
except(AttributeError):
# Here one can deal with module and class names that changed
with open(model_file, 'rb') as mf:
mod = load(mf)
except(ImportError, KeyError):
# here we load the parameter file as a module using the stored
# source string. Obviously this is dangerous as it will execute
# whatever is in the stored source string. But it can be used to
# recover functions (especially dependcy functions) that are user
# defined
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
if dangerous:
user_module = import_module_from_string(param_file[1], modname)
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
model = mod['model']
for k, v in list(model.theta_index.items()):
if type(v) is tuple:
model.theta_index[k] = slice(*v)
powell_results = mod['powell']
return model, powell_results
def read_hdf5(filename, **extras):
"""Read an HDF5 file (with a specific format) into a dictionary of results.
This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
respectively contain the sampling chain and the observational data used in
the inference.
All attributes of these groups as well as top-level attributes are loaded
into the top-level of the dictionary using ``json.loads``, and therefore
must have been written with ``json.dumps``. This should probably use
JSONDecoders, but who has time to learn that.
:param filename:
Name of the HDF5 file.
"""
groups = {"sampling": {}, "obs": {},
"bestfit": {}, "optimization": {}}
res = {}
with h5py.File(filename, "r") as hf:
# loop over the groups
for group, d in groups.items():
# check the group exists
if group not in hf:
continue
# read the arrays in that group into the dictionary for that group
for k, v in hf[group].items():
d[k] = np.array(v)
# unserialize the attributes and put them in the dictionary
for k, v in hf[group].attrs.items():
try:
d[k] = json.loads(v)
except:
try:
d[k] = unpick(v)
except:
d[k] = v
# do top-level attributes.
for k, v in hf.attrs.items():
try:
res[k] = json.loads(v)
except:
try:
res[k] = unpick(v)
except:
res[k] = v
res.update(groups['sampling'])
res["bestfit"] = groups["bestfit"]
res["optimization"] = groups["optimization"]
res['obs'] = groups['obs']
try:
res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])
except:
pass
try:
res['rstate'] = unpick(res['rstate'])
except:
pass
#try:
# mp = [names_to_functions(p.copy()) for p in res['model_params']]
# res['model_params'] = mp
#except:
# pass
return res
def read_pickles(filename, **kwargs):
"""Alias for backwards compatability. Calls `results_from()`.
"""
return results_from(filename, **kwargs)
def get_sps(res):
"""This gets exactly the SPS object used in the fiting (modulo any
changes to FSPS itself).
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_sps` method defined in the
paramfile module.
:param res:
A results dictionary (the output of `results_from()`)
:returns sps:
An sps object (i.e. from prospect.sources)
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
sps = user_module.load_sps(**res['run_params'])
except(AttributeError):
sps = user_module.build_sps(**res['run_params'])
# Now check that the SSP libraries are consistent
flib = res['run_params'].get('sps_libraries', None)
try:
rlib = sps.ssp.libraries
except(AttributeError):
rlib = None
if (flib is None) or (rlib is None):
warnings.warn("Could not check SSP library versions.")
else:
liberr = ("The FSPS libraries used in fitting({}) are not the "
"same as the FSPS libraries that you are using now ({})".format(flib, rlib))
# If fitting and reading in are happening in different python versions,
# ensure string comparison doesn't throw error:
if type(flib[0]) == 'bytes':
flib = [i.decode() for i in flib]
if type(rlib[0]) == 'bytes':
rlib = [i.decode() for i in rlib]
assert (flib[0] == rlib[0]) and (flib[1] == rlib[1]), liberr
return sps
def get_model(res):
"""This gets exactly the model object used in the fiting.
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_model` method defined in the
paramfile module, with `run_params` dictionary passed to it.
:param res:
A results dictionary (the output of `results_from()`)
:returns model:
A prospect.models.SedModel object
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
model = user_module.load_model(**res['run_params'])
except(AttributeError):
model = user_module.build_model(**res['run_params'])
return model
def import_module_from_string(source, name, add_to_sys_modules=True):
"""Well this seems dangerous.
"""
import imp
user_module = imp.new_module(name)
exec(source, user_module.__dict__)
if add_to_sys_modules:
sys.modules[name] = user_module
return user_module
def traceplot(results, showpars=None, start=0, chains=slice(None),
figsize=None, truths=None, **plot_kwargs):
"""Plot the evolution of each parameter value with iteration #, for each
walker in the chain.
:param results:
A Prospector results dictionary, usually the output of
``results_from('resultfile')``.
:param showpars: (optional)
A list of strings of the parameters to show. Defaults to all
parameters in the ``"theta_labels"`` key of the ``sample_results``
dictionary.
:param chains:
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for to keep the plot from getting too cluttered.
:param start: (optional, default: 0)
Integer giving the iteration number from which to start plotting.
:param **plot_kwargs:
Extra keywords are passed to the
``matplotlib.axes._subplots.AxesSubplot.plot()`` method.
:returns tracefig:
A multipaneled Figure object that shows the evolution of walker
positions in the parameters given by ``showpars``, as well as
ln(posterior probability)
"""
import matplotlib.pyplot as pl
# Get parameter names
try:
parnames = np.array(results['theta_labels'])
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, lnp, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start:, :]
lnp = np.atleast_2d(results['lnprobability'])[chains, start:]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start:]
nwalk = trace.shape[0]
# Set up plot windows
ndim = len(parnames) + 1
nx = int(np.floor(np.sqrt(ndim)))
ny = int(np.ceil(ndim * 1.0 / nx))
sz = np.array([nx, ny])
factor = 3.0 # size of one side of one panel
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 * factor # w/hspace size
plotdim = factor * sz + factor * (sz - 1) * whspace
dim = lbdim + plotdim + trdim
if figsize is None:
fig, axes = pl.subplots(nx, ny, figsize=(dim[1], dim[0]), sharex=True)
else:
fig, axes = pl.subplots(nx, ny, figsize=figsize, sharex=True)
axes = np.atleast_2d(axes)
#lb = lbdim / dim
#tr = (lbdim + plotdim) / dim
#fig.subplots_adjust(left=lb[1], bottom=lb[0], right=tr[1], top=tr[0],
# wspace=whspace, hspace=whspace)
# Sequentially plot the chains in each parameter
for i in range(ndim - 1):
ax = axes.flat[i]
for j in range(nwalk):
ax.plot(trace[j, :, i], **plot_kwargs)
ax.set_title(parnames[i], y=1.02)
# Plot lnprob
ax = axes.flat[-1]
for j in range(nwalk):
ax.plot(lnp[j, :], **plot_kwargs)
ax.set_title('lnP', y=1.02)
[ax.set_xlabel("iteration") for ax in axes[-1, :]]
#[ax.set_xticklabels('') for ax in axes[:-1, :].flat]
if truths is not None:
for i, t in enumerate(truths[ind_show]):
axes.flat[i].axhline(t, color='k', linestyle=':')
pl.tight_layout()
return fig
def param_evol(results, **kwargs):
"""Backwards compatability
"""
return traceplot(results, **kwargs)
def subcorner(results, showpars=None, truths=None,
start=0, thin=1, chains=slice(None),
logify=["mass", "tau"], **kwargs):
"""Make a triangle plot of the (thinned, latter) samples of the posterior
parameter space. Optionally make the plot only for a supplied subset of
the parameters.
:param showpars: (optional)
List of string names of parameters to include in the corner plot.
:param truths: (optional)
List of truth values for the chosen parameters.
:param start: (optional, default: 0)
The iteration number to start with when drawing samples to plot.
:param thin: (optional, default: 1)
The thinning of each chain to perform when drawing samples to plot.
:param chains: (optional)
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for emoving stuck walkers.
:param kwargs:
Remaining keywords are passed to the ``corner`` plotting package.
:param logify:
A list of parameter names to plot in `log10(parameter)` instead of
`parameter`
"""
try:
import corner as triangle
except(ImportError):
import triangle
except:
raise ImportError("Please install the `corner` package.")
# pull out the parameter names and flatten the thinned chains
# Get parameter names
try:
parnames = np.array(results['theta_labels'], dtype='U20')
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([parnames.tolist().index(p) for p in showpars])
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start::thin, :]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start::thin]
samples = trace.reshape(trace.shape[0] * trace.shape[1], trace.shape[2])
# logify some parameters
xx = samples.copy()
if truths is not None:
xx_truth = np.array(truths).copy()
else:
xx_truth = None
for p in logify:
if p in parnames:
idx = parnames.tolist().index(p)
xx[:, idx] = np.log10(xx[:, idx])
parnames[idx] = "log({})".format(parnames[idx])
if truths is not None:
xx_truth[idx] = np.log10(xx_truth[idx])
# mess with corner defaults
corner_kwargs = {"plot_datapoints": False, "plot_density": False,
"fill_contours": True, "show_titles": True}
corner_kwargs.update(kwargs)
fig = triangle.corner(xx, labels=parnames, truths=xx_truth,
quantiles=[0.16, 0.5, 0.84], weights=wghts, **corner_kwargs)
return fig
def subtriangle(results, **kwargs):
"""Backwards compatability
"""
return subcorner(results, **kwargs)
def compare_paramfile(res, filename):
"""Compare the runtime parameter file text stored in the `res` dictionary
to the text of some existing file with fully qualified path `filename`.
"""
from pprint import pprint
from difflib import unified_diff
a = res["paramfile_text"]
aa = a.split('\n')
with open(filename, "r") as f:
b = json.dumps(f.read())
bbl = json.loads(b)
bb = bbl.split('\n')
pprint([l for l in unified_diff(aa, bb)])
def names_to_functions(p):
"""Replace names of functions (or pickles of objects) in a parameter
description with the actual functions (or pickles).
"""
from importlib import import_module
for k, v in list(p.items()):
try:
m = import_module(v[1])
f = m.__dict__[v[0]]
except:
try:
f = pickle.loads(v)
except:
f = v
p[k] = f
return p
|
gpl-2.0
|
brainwater/keras
|
tests/manual/check_callbacks.py
|
82
|
7540
|
import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
|
mit
|
niknow/scipy
|
scipy/cluster/hierarchy.py
|
18
|
95902
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single' an optimized algorithm called SLINK is implemented,
which has :math:`O(n^2)` time complexity.
For methods 'complete', 'average', 'weighted' and 'ward' an algorithm
called nearest-neighbors chain is implemented, which too has time
complexity :math:`O(n^2)`.
For other methods a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median' and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is a user responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", `arXiv:1109.2378v1 <http://arxiv.org/abs/1109.2378v1>`_
, 2011.
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
return _hierarchy.slink(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
return _hierarchy.nn_chain(y, n, method_code)
else:
return _hierarchy.linkage(y, n, method_code)
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Returns clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally all singleton
and non-singleton clusters are in one group. If `n_clusters` or
`height` is given, the columns correspond to the columns of `n_clusters` or
`height`.
Examples
--------
>>> from scipy import cluster
>>> np.random.seed(23)
>>> X = np.random.randn(50, 4)
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]])
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.where(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows minus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e. a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (
leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e. setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which is
``['g', 'r', 'c', 'm', 'y', 'k']``).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in Scipy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,
... 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['g', 'b', 'b', 'b', 'b']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = ['g', 'r', 'c', 'm', 'y', 'k']
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
See Also
--------
linkage, set_link_color_palette
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# its label is either the empty string or the number of original
# observations belonging to cluster i.
if 2 * n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
|
bsd-3-clause
|
jzt5132/scikit-learn
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
dpshelio/sunpy
|
sunpy/net/jsoc/jsoc.py
|
2
|
35152
|
# -*- coding: utf-8 -*-
import os
import time
import urllib
import warnings
from collections import Sequence
import drms
import numpy as np
import pandas as pd
import astropy.units as u
import astropy.time
import astropy.table
from astropy.utils.misc import isiterable
from parfive import Downloader, Results
from sunpy import config
from sunpy.net.base_client import BaseClient
from sunpy.net.attr import and_
from sunpy.net.jsoc.attrs import walker
from sunpy.util.exceptions import SunpyUserWarning
__all__ = ['JSOCClient', 'JSOCResponse']
PKEY_LIST_TIME = {'T_START', 'T_REC', 'T_OBS', 'MidTime', 'OBS_DATE',
'obsdate', 'DATE_OBS', 'starttime', 'stoptime', 'UTC_StartTime'}
class NotExportedError(Exception):
pass
class JSOCResponse(Sequence):
def __init__(self, table=None):
"""
table : `astropy.table.Table`
"""
self.table = table
self.query_args = None
self.requests = None
def __str__(self):
return str(self.table)
def __repr__(self):
return repr(self.table)
def _repr_html_(self):
return self.table._repr_html_()
def __len__(self):
if self.table is None:
return 0
else:
return len(self.table)
def __getitem__(self, item):
return type(self)(self.table[item])
def __iter__(self):
return (t for t in [self])
def append(self, table):
if self.table is None:
self.table = table
else:
self.table = astropy.table.vstack([self.table, table])
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
"""
warnings.warn("The JSOC client does not support response block properties.", SunpyUserWarning)
return set()
class JSOCClient(BaseClient):
"""
This is a Client to the JSOC Data Export service.
It exposes a similar API to the VSO client, although the underlying model
is more complex. The JSOC stages data before you can download it, so a JSOC
query is a three stage process. First you query the JSOC for records,
a table of these records is returned. Then you can request these records to
be staged for download and then you can download them.
The last two stages of this process are bundled together into the `fetch()`
method, but they can be separated if you are performing a large or complex
query.
.. warning::
JSOC requires you to register your email address before requesting
data. `See this on how to register <http://jsoc.stanford.edu/ajax/register_email.html>`__.
Notes
-----
The full list of ``Series`` is available through this `site <http://jsoc.stanford.edu>`_.
JSOC requires a validated email address, you can pass in your validated email address
using the `~sunpy.net.jsoc.attrs.Notify` attribute. You have to register your email address
with JSOC beforehand `here <http://jsoc.stanford.edu/ajax/register_email.html>`_.
The backend of SunPy's JSOC Client uses `drms package <https://github.com/sunpy/drms>`_.
The tutorials can be `found here <https://docs.sunpy.org/projects/en/stable/tutorial.html>`_.
This can be used to build complex queries, by directly inputting the query string.
Examples
--------
*Example 1*
Query JSOC for some HMI data at 45 second cadence::
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient()
>>> response = client.search(a.Time('2014-01-01T00:00:00', '2014-01-01T00:10:00'),
... a.jsoc.Series('hmi.m_45s'), a.jsoc.Notify("[email protected]")) # doctest: +REMOTE_DATA
The response object holds the records that your query will return:
>>> print(response) # doctest: +ELLIPSIS +REMOTE_DATA
T_REC TELESCOP INSTRUME WAVELNTH CAR_ROT
----------------------- -------- ---------- -------- -------
2014.01.01_00:00:45_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:01:30_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:02:15_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:03:00_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:03:45_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:04:30_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:05:15_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:06:00_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:06:45_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:07:30_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:08:15_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:09:00_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:09:45_TAI SDO/HMI HMI_FRONT2 6173.0 2145
2014.01.01_00:10:30_TAI SDO/HMI HMI_FRONT2 6173.0 2145
You can then make the request and download the data::
>>> res = client.fetch(response) # doctest: +SKIP
This returns a Results instance which can be used to watch the progress
of the download.
Note
----
A registered email address is not required if you only need to query for data,
it is used only if you need to make an export request. For example,::
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> response = client.search(a.Time('2014-01-01T00:00:00', '2014-01-01T00:10:00'),
... a.jsoc.Series('hmi.m_45s')) # doctest: +REMOTE_DATA
The above is a successful query operation, and will return query responses as before.
But, this response object cannot be used to make an export request and will throw an
error if done so::
>>> res = client.fetch(response) # doctest: +SKIP
ValueError: Email address is invalid or not registered
*Example 2*
Query the JSOC for some AIA 171 data, and separate out the staging and the
download steps::
>>> import astropy.units as u
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> response = client.search(a.Time('2014/1/1T00:00:00', '2014/1/1T00:00:36'),
... a.jsoc.Series('aia.lev1_euv_12s'), a.jsoc.Segment('image'),
... a.jsoc.Wavelength(171*u.AA), a.jsoc.Notify("[email protected]")) # doctest: +REMOTE_DATA
The response object holds the records that your query will return:
>>> print(response) # doctest: +REMOTE_DATA
T_REC TELESCOP INSTRUME WAVELNTH CAR_ROT
-------------------- -------- -------- -------- -------
2014-01-01T00:00:01Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:13Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:25Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:37Z SDO/AIA AIA_3 171 2145
You can then make the request::
>>> requests = client.request_data(response) # doctest: +SKIP
This returns a list of all the ExportRequest objects for your query. You can
get the ExportRequest ID ::
>>> requests.id # doctest: +SKIP
'JSOC_20171205_372'
You can also check the status of the request, which will print out a status
message and return you the status code, a code of 1 means it is not ready
to download and a code of 0 means the request is staged and ready. A code
of 6 means an error, which is commonly that the request has not had time to
get into the queue::
>>> requests.status # doctest: +SKIP
0
Once the status code is 0 you can download the data using the `get_request`
method::
>>> res = client.get_request(requests) # doctest: +SKIP
This returns a Results instance which can be used to watch the progress
of the download::
>>> res.wait(progress=True) # doctest: +SKIP
"""
def search(self, *query, **kwargs):
"""
Build a JSOC query and submit it to JSOC for processing.
Takes a variable number of `~sunpy.net.jsoc.attrs` as parameters,
which are chained together using the AND (`&`) operator.
Complex queries to be easily formed using logical operators such as
`&` and `|`, in the same way as the VSO client.
Parameters
----------
query : a variable number of `~sunpy.net.jsoc.attrs`
as parameters, which are chained together using
the ``AND`` (``&``) operator.
Returns
-------
response : `~sunpy.net.jsoc.jsoc.JSOCResponse` object
A collection of records that the query returns.
Examples
--------
*Example 1*
Request all AIA 304 image data between 2014-01-01T00:00 and
2014-01-01T01:00::
>>> import astropy.units as u
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> response = client.search(a.Time('2017-09-06T12:00:00', '2017-09-06T12:02:00'),
... a.jsoc.Series('aia.lev1_euv_12s'), a.jsoc.Wavelength(304*u.AA),
... a.jsoc.Segment('image')) # doctest: +REMOTE_DATA
>>> print(response) # doctest: +REMOTE_DATA
T_REC TELESCOP INSTRUME WAVELNTH CAR_ROT
-------------------- -------- -------- -------- -------
2017-09-06T11:59:59Z SDO/AIA AIA_4 304 2194
2017-09-06T12:00:11Z SDO/AIA AIA_4 304 2194
2017-09-06T12:00:23Z SDO/AIA AIA_4 304 2194
2017-09-06T12:00:35Z SDO/AIA AIA_4 304 2194
2017-09-06T12:00:47Z SDO/AIA AIA_4 304 2194
2017-09-06T12:00:59Z SDO/AIA AIA_4 304 2194
2017-09-06T12:01:11Z SDO/AIA AIA_4 304 2194
2017-09-06T12:01:23Z SDO/AIA AIA_4 304 2194
2017-09-06T12:01:35Z SDO/AIA AIA_4 304 2194
2017-09-06T12:01:47Z SDO/AIA AIA_4 304 2194
2017-09-06T12:01:59Z SDO/AIA AIA_4 304 2194
*Example 2*
Request keyword data of ``hmi.v_45s`` for certain specific keywords only::
>>> import astropy.units as u
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> response = client.search(a.Time('2014-01-01T00:00:00', '2014-01-01T00:10:00'),
... a.jsoc.Series('hmi.v_45s'),
... a.jsoc.Keys('T_REC, DATAMEAN, OBS_VR')) # doctest: +REMOTE_DATA
>>> print(response) # doctest: +REMOTE_DATA
T_REC DATAMEAN OBS_VR
----------------------- ------------------ ------------------
2014.01.01_00:00:45_TAI 1906.518188 1911.202614
2014.01.01_00:01:30_TAI 1908.876221 1913.945512
2014.01.01_00:02:15_TAI 1911.7771 1916.6679989999998
2014.01.01_00:03:00_TAI 1913.422485 1919.3699239999999
2014.01.01_00:03:45_TAI 1916.500488 1922.050862
2014.01.01_00:04:30_TAI 1920.414795 1924.7110050000001
2014.01.01_00:05:15_TAI 1922.636963 1927.35015
2014.01.01_00:06:00_TAI 1924.6973879999998 1929.968523
2014.01.01_00:06:45_TAI 1927.758301 1932.5664510000001
2014.01.01_00:07:30_TAI 1929.646118 1935.14288
2014.01.01_00:08:15_TAI 1932.097046 1937.698521
2014.01.01_00:09:00_TAI 1935.7286379999998 1940.23353
2014.01.01_00:09:45_TAI 1937.754028 1942.747605
2014.01.01_00:10:30_TAI 1940.1462399999998 1945.241147
*Example 3*
Request data of ``aia.lev1_euv_12s`` on the basis of PrimeKeys other than ``T_REC``::
>>> import astropy.units as u
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> response = client.search(a.Time('2014-01-01T00:00:00', '2014-01-01T00:01:00'),
... a.jsoc.Series('aia.lev1_euv_12s'),
... a.jsoc.PrimeKey('WAVELNTH','171')) # doctest: +REMOTE_DATA
>>> print(response) # doctest: +REMOTE_DATA
T_REC TELESCOP INSTRUME WAVELNTH CAR_ROT
-------------------- -------- -------- -------- -------
2014-01-01T00:00:01Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:13Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:25Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:37Z SDO/AIA AIA_3 171 2145
2014-01-01T00:00:49Z SDO/AIA AIA_3 171 2145
2014-01-01T00:01:01Z SDO/AIA AIA_3 171 2145
"""
return_results = JSOCResponse()
query = and_(*query)
blocks = []
for block in walker.create(query):
iargs = kwargs.copy()
iargs.update(block)
blocks.append(iargs)
return_results.append(self._lookup_records(iargs))
return_results.query_args = blocks
return return_results
def search_metadata(self, *query, **kwargs):
"""
Get the metadata of all the files obtained in a search query.
Builds a jsoc query, similar to query method, and takes similar inputs.
Complex queries to be easily formed using logical operators such as
``&`` and ``|``, in the same way as the query function.
Parameters
----------
query : a variable number of `~sunpy.net.jsoc.attrs`
as parameters, which are chained together using
the ``AND`` (``&``) operator.
Returns
-------
res : `~pandas.DataFrame` object
A collection of metadata of all the files.
Example
-------
Request metadata or all all AIA 304 image data between 2014-01-01T00:00 and
2014-01-01T01:00.
Since, the function only performs a lookdata, and does not make a proper export
request, attributes like Segment need not be passed::
>>> import astropy.units as u
>>> from sunpy.net import jsoc
>>> from sunpy.net import attrs as a
>>> client = jsoc.JSOCClient() # doctest: +REMOTE_DATA
>>> metadata = client.search_metadata(
... a.Time('2014-01-01T00:00:00', '2014-01-01T00:01:00'),
... a.jsoc.Series('aia.lev1_euv_12s'), a.jsoc.Wavelength(304*u.AA)) # doctest: +REMOTE_DATA
>>> print(metadata[['T_OBS', 'WAVELNTH']]) # doctest: +REMOTE_DATA
T_OBS WAVELNTH
aia.lev1_euv_12s[2014-01-01T00:00:01Z][304] 2014-01-01T00:00:08.57Z 304
aia.lev1_euv_12s[2014-01-01T00:00:13Z][304] 2014-01-01T00:00:20.58Z 304
aia.lev1_euv_12s[2014-01-01T00:00:25Z][304] 2014-01-01T00:00:32.57Z 304
aia.lev1_euv_12s[2014-01-01T00:00:37Z][304] 2014-01-01T00:00:44.58Z 304
aia.lev1_euv_12s[2014-01-01T00:00:49Z][304] 2014-01-01T00:00:56.57Z 304
aia.lev1_euv_12s[2014-01-01T00:01:01Z][304] 2014-01-01T00:01:08.59Z 304
"""
query = and_(*query)
blocks = []
res = pd.DataFrame()
for block in walker.create(query):
iargs = kwargs.copy()
iargs.update(block)
iargs.update({'meta': True})
blocks.append(iargs)
res = res.append(self._lookup_records(iargs))
return res
def request_data(self, jsoc_response, **kwargs):
"""
Request that JSOC stages the data for download. This method will not
wait for the request to be staged.
Parameters
----------
jsoc_response : `~sunpy.net.jsoc.jsoc.JSOCResponse` object
The results of a query
Returns
-------
requests : `~drms.ExportRequest` object or
a list of `~drms.ExportRequest` objects
Request Id can be accessed by requests.id
Request status can be accessed by requests.status
"""
requests = []
self.query_args = jsoc_response.query_args
for block in jsoc_response.query_args:
ds = self._make_recordset(**block)
cd = drms.Client(email=block.get('notify', ''))
protocol = block.get('protocol', 'fits')
if protocol != 'fits' and protocol != 'as-is':
error_message = "Protocols other than fits and as-is are "\
"are not supported."
raise TypeError(error_message)
method = 'url' if protocol == 'fits' else 'url_quick'
r = cd.export(ds, method=method, protocol=protocol)
requests.append(r)
if len(requests) == 1:
return requests[0]
return requests
def fetch(self, jsoc_response, path=None, progress=True, overwrite=False,
downloader=None, wait=True, sleep=10):
"""
Make the request for the data in a JSOC response and wait for it to be
staged and then download the data.
Parameters
----------
jsoc_response : `~sunpy.net.jsoc.jsoc.JSOCResponse` object
A response object
path : `str`
Path to save data to, defaults to SunPy download dir
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bar will be shown.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
max_conns : `int`
Maximum number of download connections.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if `downloader` is not `None`.
sleep : `int`
The number of seconds to wait between calls to JSOC to check the status
of the request.
Returns
-------
results : a `~sunpy.net.download.Results` instance
A Results object
"""
# Make staging request to JSOC
responses = self.request_data(jsoc_response)
# Make response iterable
if not isiterable(responses):
responses = [responses]
# Add them to the response for good measure
jsoc_response.requests = [r for r in responses]
time.sleep(sleep/2.)
for response in responses:
response.wait(verbose=progress)
return self.get_request(responses, path=path, overwrite=overwrite,
progress=progress, downloader=downloader,
wait=wait)
def get_request(self, requests, path=None, overwrite=False, progress=True,
downloader=None, wait=True):
"""
Query JSOC to see if the request(s) is ready for download.
If the request is ready for download, it will then download it.
Parameters
----------
requests : `~drms.ExportRequest`, `str`, `list`
`~drms.ExportRequest` objects or `str` request IDs or lists
returned by `~sunpy.net.jsoc.jsoc.JSOCClient.request_data`.
path : `str`
Path to save data to, defaults to SunPy download dir.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bar will be shown.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if `downloader` is not `None`.
Returns
-------
res: `~sunpy.net.download.Results`
A `~sunpy.net.download.Results` instance or `None` if no URLs to download
"""
c = drms.Client()
# Convert Responses to a list if not already
if isinstance(requests, str) or not isiterable(requests):
requests = [requests]
# Ensure all the requests are drms ExportRequest objects
for i, request in enumerate(requests):
if isinstance(request, str):
r = c.export_from_id(request)
requests[i] = r
# We only download if all are finished
if not all([r.has_succeeded() for r in requests]):
raise NotExportedError("Can not download as not all the requests "
"have been exported for download yet.")
# Ensure path has a {file} in it
if path is None:
default_dir = config.get("downloads", "download_dir")
path = os.path.join(default_dir, '{file}')
elif isinstance(path, str) and '{file}' not in path:
path = os.path.join(path, '{file}')
paths = []
for request in requests:
for filename in request.data['filename']:
# Ensure we don't duplicate the file extension
ext = os.path.splitext(filename)[1]
if path.endswith(ext):
fname = path.strip(ext)
else:
fname = path
fname = fname.format(file=filename)
fname = os.path.expanduser(fname)
paths.append(fname)
dl_set = True
if not downloader:
dl_set = False
downloader = Downloader(progress=progress, overwrite=overwrite)
urls = []
for request in requests:
if request.status == 0:
for index, data in request.data.iterrows():
url_dir = request.request_url + '/'
urls.append(urllib.parse.urljoin(url_dir, data['filename']))
if urls:
if progress:
print_message = "{0} URLs found for download. Full request totalling {1}MB"
print(print_message.format(len(urls), request._d['size']))
for aurl, fname in zip(urls, paths):
downloader.enqueue_file(aurl, filename=fname)
if dl_set and not wait:
return Results()
results = downloader.download()
return results
def _make_recordset(self, series, start_time='', end_time='', wavelength='',
segment='', primekey={}, **kwargs):
"""
Take the query arguments and build a record string.
All the primekeys are now stored in primekey dict, including Time and Wavelength
which were passed through pre-defined attributes. The following piece of code,
extracts the passed prime-keys and arranges it in the order as it appears in the
JSOC database.
`pkeys_isTime` is a Pandas DataFrame, whose index values are the Prime-key names
and the column stores a boolean value, identifying whether the prime-key is a
Time-type prime-key or not. Since, time-type prime-keys exist by different names,
we made it uniform in the above piece of code, by storing the time-type primekey
with a single name `TIME`.
Considering an example, if the primekeys that exist for a given series are
['HARPNUM', 'T_OBS', 'WAVELNTH'], we will consider three different cases of the
passed primekeys.
pkeys_isTime.index.values = ['HARPNUM', 'T_OBS', 'WAVELNTH']
Case 1
------
primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI',
'HARPNUM' : '4864',
'WAVELNTH': '605'}
If the primekey dict is as above, then pkstr should be as:
pkstr = '{4864}{2014.01.01_00:00:45_TAI}{605}'
Case 2
------
primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI',
'WAVELNTH': '605'}
If the primekey dict is as above, then pkstr should be as:
pkstr = '{}{2014.01.01_00:00:45_TAI}{605}'
Case 3
------
primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI'}
If the primekey dict is as above, then pkstr should be as:
pkstr = '{}{2014.01.01_00:00:45_TAI}'
The idea behind this should be clear. We build up the `pkstr` string
containing the values of the prime-keys passed in the same order as
it occurs in the list `pkeys_isTime.index.values`, i.e. how it is stored
in the online database. Any missing prime-keys should be compensated by
an empty {}, if it occurs before any passed prime-key. Any empty curly braces
that is present at last of the pkstr, can be skipped.
"""
# Extract and format segment
# Convert list of segments into a comma-separated string
if segment:
if isinstance(segment, list):
segment = str(segment)[1:-1].replace(' ', '').replace("'", '')
segment = '{{{segment}}}'.format(segment=segment)
# Extract and format sample
sample = kwargs.get('sample', '')
if sample:
sample = '@{}s'.format(sample)
# Populate primekeys dict with Time and Wavelength values
if start_time and end_time:
# Check whether any primekey listed in PKEY_LIST_TIME has been passed through
# PrimeKey() attribute. If yes, raise an error, since Time can only be passed
# either through PrimeKey() attribute or Time() attribute.
if not any(x in PKEY_LIST_TIME for x in primekey):
timestr = '{start}-{end}{sample}'.format(
start=start_time.tai.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end=end_time.tai.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
sample=sample)
else:
error_message = "Time attribute has been passed both as a Time()"\
" and PrimeKey(). Please provide any one of them"\
" or separate them by OR operator."
raise ValueError(error_message)
else:
# This is executed when Time has not been passed through Time() attribute.
# `match` stores all the time-type prime-keys that has been passed through
# PrimeKey() attribute. The length of `match` won't ever be greater than 1,
# but it is a good idea to keep a check.
match = set(primekey.keys()) & PKEY_LIST_TIME
if len(match) > 1:
error_message = "Querying of series, having more than 1 Time-type "\
"prime-keys is not yet supported. Alternative is to "\
"use only one of the primekey to query for series data."
raise ValueError(error_message)
if match:
timestr = '{0}'.format(primekey.pop(list(match)[0], ''))
else:
timestr = ''
if wavelength:
if not primekey.get('WAVELNTH', ''):
if isinstance(wavelength, list):
wavelength = [int(np.ceil(wave.to(u.AA).value)) for wave in wavelength]
wavelength = str(wavelength)
else:
wavelength = '{0}'.format(int(np.ceil(wavelength.to(u.AA).value)))
else:
# This is executed when wavelength has been passed both through PrimeKey()
# and Wavelength().
error_message = "Wavelength attribute has been passed both as a Wavelength()"\
" and PrimeKey(). Please provide any one of them"\
" or separate them by OR operator."
raise ValueError(error_message)
else:
# This is executed when wavelength has been passed through PrimeKey().
wavelength = '{0}'.format(primekey.pop('WAVELNTH', ''))
# Populate primekey dict with formatted Time and Wavlength.
if timestr:
primekey['TIME'] = timestr
if wavelength:
primekey['WAVELNTH'] = wavelength
# Extract and format primekeys
pkstr = ''
c = drms.Client()
si = c.info(series)
pkeys_isTime = si.keywords.loc[si.primekeys].is_time
for pkey in pkeys_isTime.index.values:
# The loop is iterating over the list of prime-keys existing for the given series.
if len(primekey) > 0:
if pkeys_isTime[pkey]:
pkstr += '[{0}]'.format(primekey.pop('TIME', ''))
else:
pkstr += '[{0}]'.format(primekey.pop(pkey, ''))
else:
break
# break because we can skip adding {} at the end of pkstr, if the primekey
# dict is empty.
if not pkstr:
# pkstr cannot be totally empty
error_message = "Atleast one PrimeKey must be passed."
raise ValueError(error_message)
dataset = '{series}{primekey}{segment}'.format(series=series,
primekey=pkstr,
segment=segment)
return dataset
def _lookup_records(self, iargs):
"""
Do a LookData request to JSOC to workout what results the query returns.
"""
keywords_default = ['T_REC', 'TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAR_ROT']
isMeta = iargs.get('meta', False)
c = drms.Client()
if isMeta:
keywords = '**ALL**'
else:
keywords = iargs.get('keys', keywords_default)
if 'series' not in iargs:
error_message = "Series must be specified for a JSOC Query"
raise ValueError(error_message)
if not isinstance(keywords, list) and not isinstance(keywords, str):
error_message = "Keywords can only be passed as a list or "\
"comma-separated strings."
raise TypeError(error_message)
# Raise errors for PrimeKeys
# Get a set of the PrimeKeys that exist for the given series, and check
# whether the passed PrimeKeys is a subset of that.
pkeys = c.pkeys(iargs['series'])
pkeys_passed = iargs.get('primekey', None) # pkeys_passes is a dict, with key-value pairs.
if pkeys_passed is not None:
if not set(list(pkeys_passed.keys())) <= set(pkeys):
error_message = "Unexpected PrimeKeys were passed. The series {series} "\
"supports the following PrimeKeys {pkeys}"
raise ValueError(error_message.format(series=iargs['series'], pkeys=pkeys))
# Raise errors for wavelength
wavelength = iargs.get('wavelength', '')
if wavelength:
if 'WAVELNTH' not in pkeys:
error_message = "The series {series} does not support wavelength attribute."\
"The following primekeys are supported {pkeys}"
raise TypeError(error_message.format(series=iargs['series'], pkeys=pkeys))
# Raise errors for segments
# Get a set of the segments that exist for the given series, and check
# whether the passed segments is a subset of that.
si = c.info(iargs['series'])
segs = list(si.segments.index.values) # Fetches all valid segment names
segs_passed = iargs.get('segment', None)
if segs_passed is not None:
if not isinstance(segs_passed, list) and not isinstance(segs_passed, str):
error_message = "Segments can only be passed as a comma-separated"\
" string or a list of strings."
raise TypeError(error_message)
elif isinstance(segs_passed, str):
segs_passed = segs_passed.replace(' ', '').split(',')
if not set(segs_passed) <= set(segs):
error_message = "Unexpected Segments were passed. The series {series} "\
"contains the following Segments {segs}"
raise ValueError(error_message.format(series=iargs['series'], segs=segs))
iargs['segment'] = segs_passed
# If Time has been passed as a PrimeKey, convert the Time object into TAI time scale,
# and then, convert it to datetime object.
ds = self._make_recordset(**iargs)
# Convert the list of keywords into comma-separated string.
if isinstance(keywords, list):
key = str(keywords)[1:-1].replace(' ', '').replace("'", '')
else:
key = keywords
r = c.query(ds, key=key, rec_index=isMeta)
# If the method was called from search_metadata(), return a Pandas Dataframe,
# otherwise return astropy.table
if isMeta:
return r
if r is None or r.empty:
return astropy.table.Table()
else:
return astropy.table.Table.from_pandas(r)
@classmethod
def _can_handle_query(cls, *query):
chkattr = ['Series', 'Protocol', 'Notify', 'Wavelength', 'Time',
'Segment', 'Keys', 'PrimeKey', 'Sample']
return all([x.__class__.__name__ in chkattr for x in query])
|
bsd-2-clause
|
fpeder/mscr
|
mscr/util.py
|
1
|
6369
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import cv2
import os
import pylab as plt
import numpy as np
from glob import glob
from progressbar import ProgressBar, Percentage, Bar
from sklearn.neighbors import KNeighborsClassifier
def rgb2gray(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def gray2rgb(img):
return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
def gray2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
def rgb2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
def load_gray(infile):
if not os.path.exists(infile):
sys.exit('err: img file %s doesn\'t exists', infile)
img = cv2.imread(infile, 0)
return img
def get_dim(img):
return img.shape[:2]
def imshow(img):
plt.imshow(img)
plt.show()
class MyCC():
def __init__(self, labels, min_elem=10000, remove_small=True):
self._labs = labels
self._me = min_elem
self._rs = remove_small
self._cc = []
def run(self, img):
H, W = img.shape[:2]
out = []
for lab in self._labs:
tmp = np.zeros((H, W), np.int32)
tmp[img == lab] = -1
ccount = 1
while True:
pt = self.__find_seed(tmp)
if not pt:
break
cv2.floodFill(tmp, None, pt, ccount)
ccount += 1
if self._rs:
tmp = self.__remove_small(tmp)
out.append(tmp.copy())
self._cc = out
return out
def get_bounding_box(self):
allbb = []
for comp in self._cc:
bb = []
for lab in np.unique(comp)[1:]:
x, y = np.where(comp == lab)
tmp = [(y.min(), x.min()), (y.max(), x.max())]
bb.append(tmp)
allbb.append(bb)
return allbb
def __remove_small(self, img):
for lab in np.unique(img)[1:]:
cc = img == lab
if cc.sum() < self._me:
img[cc] = 0
return img
def __find_seed(self, img, l=-1):
pt = ()
tmp = np.where(img == l)
if len(tmp[0]) > 0 and len(tmp[1]) > 0:
pt = (tmp[1][0], tmp[0][0])
return pt
class MyKNN():
def __init__(self, cls, nn=25, skip=4):
self._cls = cls
self._skip = skip
self._clf = KNeighborsClassifier(n_neighbors=nn)
def fit(self, img):
X, y = self.__get_data(img)
self._clf.fit(X, y)
def predict(self, X):
p = self._clf.predict(X)
return p
def __get_data(self, img):
X, y = [], []
for k, v in self._cls.iteritems():
tmp = np.where(img == v)
tx = np.vstack((tmp[0], tmp[1])).T
tx = tx[::self._skip, :]
ty = v * np.ones(tx.shape[0], np.uint8)
X = np.vstack((X, tx)) if len(X) > 0 else tx
y = np.hstack((y, ty)) if len(y) > 0 else ty
return X, y
@property
def labels(self):
return self._cls.values()
class MyMorph():
def __init__(self, size, niter, elem=cv2.MORPH_RECT):
self._size = size
self._niter = niter
self._elem = elem
def run(self, img):
strel = cv2.getStructuringElement(cv2.MORPH_RECT, self._size)
img = cv2.morphologyEx(img.astype(np.float32), cv2.MORPH_CLOSE,
strel, None, None, self._niter)
return img
class AddSuffix():
def __init__(self, sub, ext, sep='.'):
self._sub = sub
self._ext = ext
self._sep = sep
def run(self, path):
folder, name = os.path.split(path)
tmp = self._sep.join(name.split(self._sep)[:-1])
name = tmp + self._sep + self._sub + self._sep + self._ext
out = os.path.join(folder, name) if folder else name
return out
class ImgFileIter(object):
def __init__(self, src, msg='', ext='.jpg', crop=False):
self._src = src
self._msg = msg
self._ext = ext
self._crop = crop
def run(self):
self.__chek_path([self._src])
filz = glob(os.path.join(self._src, '*' + self._ext))
pbar = MyProgressBar(len(filz), self._msg)
for x in filz:
img = cv2.imread(x)
if self._crop:
img = self._crop.run(img)
pbar.update()
yield img
pbar.finish()
def __chek_path(self, path):
for x in path:
if not os.path.exists(x):
sys.exit('err: %s doesn\'t exists' % x)
class MyProgressBar(object):
def __init__(self, num, msg=''):
self._count = 0
self._pb = ProgressBar(
widgets=[msg, ':', Percentage(), ' ', Bar()],
maxval=num).start()
def update(self):
self._pb.update(self._count)
self._count += 1
def finish(self):
self._pb.finish()
class Crop(object):
def __init__(self, ksize=5,
crit=cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU):
self._ksize = ksize
self._box = []
self._thr = MyThreshold(ksize=ksize, crit=crit)
def run(self, img):
tmp = rgb2gray(img) if len(img.shape) == 3 else img
tmp = self._thr.run(tmp)
y, x = np.where(tmp == 255)
box = ((y.min(), x.min()), (y.max(), x.max()))
img = img[box[0][0]:box[1][0], box[0][1]:box[1][1]]
self._box = box
return img
@property
def box(self):
return self._box
class MyThreshold():
def __init__(self, val=255, ksize=5,
crit=cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU):
self._val = val
self._ksize = ksize
self._crit = crit
def run(self, img):
_, tmp = cv2.threshold(img, 0, self._val, self._crit)
if self._ksize:
tmp = cv2.medianBlur(tmp, self._ksize)
return tmp
class StdResize(object):
def __init__(self, h, method=cv2.INTER_LINEAR):
self._h = h
self._method = method
def run(self, img):
r = float(img.shape[1])/img.shape[0]
img = cv2.resize(img, (int(r * self._h), self._h),
interpolation=self._method)
return img
if __name__ == '__main__':
img = load_gray('test/test4.jpg')
c = Crop()
asd = c.run(img)
imshow(asd)
|
bsd-2-clause
|
yunque/librosa
|
tests/test_onset.py
|
1
|
7465
|
#!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <[email protected]>
# unit tests for librosa.beat
from __future__ import print_function
from nose.tools import raises, eq_
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import matplotlib
matplotlib.use('Agg')
import warnings
import numpy as np
import librosa
__EXAMPLE_FILE = 'data/test1_22050.wav'
def test_onset_strength_audio():
def __test(y, sr, feature, n_fft, hop_length, lag, max_size, detrend, center, aggregate):
oenv = librosa.onset.onset_strength(y=y, sr=sr,
S=None,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length,
lag=lag,
max_size=max_size)
assert oenv.ndim == 1
S = librosa.feature.melspectrogram(y=y,
n_fft=n_fft,
hop_length=hop_length)
target_shape = S.shape[-1]
#if center:
# target_shape += n_fft // (2 * hop_length)
if not detrend:
assert np.all(oenv >= 0)
eq_(oenv.shape[-1], target_shape)
y, sr = librosa.load(__EXAMPLE_FILE)
for feature in [None,
librosa.feature.melspectrogram,
librosa.feature.chroma_stft]:
for n_fft in [512, 2048]:
for hop_length in [n_fft // 2, n_fft // 4]:
for lag in [0, 1, 2]:
for max_size in [0, 1, 2]:
for detrend in [False, True]:
for center in [False, True]:
for aggregate in [None, np.mean, np.max]:
if lag < 1 or max_size < 1:
tf = raises(librosa.ParameterError)(__test)
else:
tf = __test
yield (tf, y, sr, feature, n_fft,
hop_length, lag, max_size, detrend, center, aggregate)
tf = raises(librosa.ParameterError)(__test)
yield (tf, None, sr, feature, n_fft,
hop_length, lag, max_size, detrend, center, aggregate)
def test_onset_strength_spectrogram():
def __test(S, sr, feature, n_fft, hop_length, detrend, center):
oenv = librosa.onset.onset_strength(y=None, sr=sr,
S=S,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length)
assert oenv.ndim == 1
target_shape = S.shape[-1]
#if center:
# target_shape += n_fft // (2 * hop_length)
if not detrend:
assert np.all(oenv >= 0)
eq_(oenv.shape[-1], target_shape)
y, sr = librosa.load(__EXAMPLE_FILE)
S = librosa.feature.melspectrogram(y=y, sr=sr)
for feature in [None,
librosa.feature.melspectrogram,
librosa.feature.chroma_stft]:
for n_fft in [512, 2048]:
for hop_length in [n_fft // 2, n_fft // 4]:
for detrend in [False, True]:
for center in [False, True]:
for aggregate in [None, np.mean, np.max]:
yield (__test, S, sr, feature, n_fft,
hop_length, detrend, center)
tf = raises(librosa.ParameterError)(__test)
yield (tf, None, sr, feature, n_fft,
hop_length, detrend, center)
def test_onset_strength_multi():
y, sr = librosa.load(__EXAMPLE_FILE)
S = librosa.feature.melspectrogram(y=y, sr=sr)
channels = np.linspace(0, S.shape[0], num=5)
for lag in [1, 2, 3]:
for max_size in [1]:
# We only test with max_size=1 here to make the sub-band slicing test simple
odf_multi = librosa.onset.onset_strength_multi(S=S,
lag=lag, max_size=1,
channels=channels)
eq_(len(odf_multi), len(channels) - 1)
for i, (s, t) in enumerate(zip(channels, channels[1:])):
odf_single = librosa.onset.onset_strength(S=S[s:t],
lag=lag,
max_size=1)
assert np.allclose(odf_single, odf_multi[i])
def test_onset_detect_real():
def __test(y, sr, oenv, hop_length):
onsets = librosa.onset.onset_detect(y=y, sr=sr, onset_envelope=oenv,
hop_length=hop_length)
assert np.all(onsets > 0)
assert np.all(onsets < len(y) * sr // hop_length)
if oenv is not None:
assert np.all(onsets < len(oenv))
y, sr = librosa.load(__EXAMPLE_FILE)
# Test with no signal
yield raises(librosa.ParameterError)(__test), None, sr, None, 512
for hop_length in [64, 512, 2048]:
yield __test, y, sr, None, hop_length
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
yield __test, y, sr, oenv, hop_length
def test_onset_detect_const():
def __test(y, sr, oenv, hop_length):
onsets = librosa.onset.onset_detect(y=y, sr=sr, onset_envelope=oenv,
hop_length=hop_length)
eq_(len(onsets), 0)
sr = 22050
duration = 3.0
for f in [np.zeros, np.ones]:
y = f(int(duration * sr))
for hop_length in [64, 512, 2048]:
yield __test, y, sr, None, hop_length
yield __test, -y, sr, None, hop_length
oenv = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length)
yield __test, y, sr, oenv, hop_length
def test_onset_strength_deprecated():
y, sr = librosa.load(__EXAMPLE_FILE)
def __test(centering):
no_warning = (centering is None)
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
librosa.onset.onset_strength(y=y, sr=sr, centering=centering)
if no_warning:
eq_(out, [])
else:
assert len(out) > 0
assert out[0].category is DeprecationWarning
assert 'deprecated' in str(out[0].message).lower()
for centering in [True, False, None]:
yield __test, centering
|
isc
|
MatthieuBizien/scikit-learn
|
examples/neural_networks/plot_mnist_filters.py
|
57
|
2195
|
"""
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
0x0all/kaggle-galaxies
|
predict_augmented_npy_maxout2048_extradense_dup3.py
|
7
|
9593
|
"""
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
|
bsd-3-clause
|
crwilcox/PTVS
|
Python/Product/ML/ProjectTemplates/ClusteringTemplate/clustering.py
|
18
|
10394
|
'''
This script perfoms the basic process for applying a machine learning
algorithm to a dataset using Python libraries.
The four steps are:
1. Download a dataset (using pandas)
2. Process the numeric data (using numpy)
3. Train and evaluate learners (using scikit-learn)
4. Plot and compare results (using matplotlib)
The data is downloaded from URL, which is defined below. As is normal
for machine learning problems, the nature of the source data affects
the entire solution. When you change URL to refer to your own data, you
will need to review the data processing steps to ensure they remain
correct.
============
Example Data
============
The example is from http://archive.ics.uci.edu/ml/datasets/Water+Treatment+Plant
It contains a range of continuous values from sensors at a water
treatment plant, and the aim is to use unsupervised learners to
determine whether the plant is operating correctly. See the linked page
for more information about the data set.
This script uses unsupervised clustering learners and dimensionality
reduction models to find similar values, outliers, and visualize the
classes.
'''
# Remember to update the script for the new data when you change this URL
URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/water-treatment/water-treatment.data"
# Uncomment this call when using matplotlib to generate images
# rather than displaying interactive UI.
#import matplotlib
#matplotlib.use('Agg')
from pandas import read_table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
# [OPTIONAL] Seaborn makes plots nicer
import seaborn
except ImportError:
pass
# =====================================================================
def download_data():
'''
Downloads the data for this script into a pandas DataFrame.
'''
# If your data is in an Excel file, install 'xlrd' and use
# pandas.read_excel instead of read_table
#from pandas import read_excel
#frame = read_excel(URL)
# If your data is in a private Azure blob, install 'azure' and use
# BlobService.get_blob_to_path() with read_table() or read_excel()
#import azure.storage
#service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)
#service.get_blob_to_path(container_name, blob_name, 'my_data.csv')
#frame = read_table('my_data.csv', ...
frame = read_table(
URL,
# Uncomment if the file needs to be decompressed
#compression='gzip',
#compression='bz2',
# Specify the file encoding
# Latin-1 is common for data from US sources
encoding='latin-1',
#encoding='utf-8', # UTF-8 is also common
# Specify the separator in the data
sep=',', # comma separated values
#sep='\t', # tab separated values
#sep=' ', # space separated values
# Ignore spaces after the separator
skipinitialspace=True,
# Treat question marks as missing values
na_values=['?'],
# Generate row labels from each row number
index_col=None,
#index_col=0, # use the first column as row labels
#index_col=-1, # use the last column as row labels
# Generate column headers row from each column number
header=None,
#header=0, # use the first line as headers
# Use manual headers and skip the first row in the file
#header=0,
#names=['col1', 'col2', ...],
)
# Return a subset of the columns
#return frame[['col1', 'col4', ...]]
# Return the entire frame
#return frame
# Return all except the first column
del frame[frame.columns[0]]
return frame
# =====================================================================
def get_features(frame):
'''
Transforms and scales the input data and returns a numpy array that
is suitable for use with scikit-learn.
Note that in unsupervised learning there are no labels.
'''
# Replace missing values with 0.0
# or we can use scikit-learn to calculate missing values below
#frame[frame.isnull()] = 0.0
# Convert values to floats
arr = np.array(frame, dtype=np.float)
# Impute missing values from the mean of their entire column
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='mean')
arr = imputer.fit_transform(arr)
# Normalize the entire data set to mean=0.0 and variance=1.0
from sklearn.preprocessing import scale
arr = scale(arr)
return arr
# =====================================================================
def reduce_dimensions(X):
'''
Reduce the dimensionality of X with different reducers.
Return a sequence of tuples containing:
(title, x coordinates, y coordinates)
for each reducer.
'''
# Principal Component Analysis (PCA) is a linear reduction model
# that identifies the components of the data with the largest
# variance.
from sklearn.decomposition import PCA
reducer = PCA(n_components=2)
X_r = reducer.fit_transform(X)
yield 'PCA', X_r[:, 0], X_r[:, 1]
# Independent Component Analysis (ICA) decomposes a signal by
# identifying the independent contributing sources.
from sklearn.decomposition import FastICA
reducer = FastICA(n_components=2)
X_r = reducer.fit_transform(X)
yield 'ICA', X_r[:, 0], X_r[:, 1]
# t-distributed Stochastic Neighbor Embedding (t-SNE) is a
# non-linear reduction model. It operates best on data with a low
# number of attributes (<50) and is often preceded by a linear
# reduction model such as PCA.
from sklearn.manifold import TSNE
reducer = TSNE(n_components=2)
X_r = reducer.fit_transform(X)
yield 't-SNE', X_r[:, 0], X_r[:, 1]
def evaluate_learners(X):
'''
Run multiple times with different learners to get an idea of the
relative performance of each configuration.
Returns a sequence of tuples containing:
(title, predicted classes)
for each learner.
'''
from sklearn.cluster import (MeanShift, MiniBatchKMeans,
SpectralClustering, AgglomerativeClustering)
learner = MeanShift(
# Let the learner use its own heuristic for determining the
# number of clusters to create
bandwidth=None
)
y = learner.fit_predict(X)
yield 'Mean Shift clusters', y
learner = MiniBatchKMeans(n_clusters=2)
y = learner.fit_predict(X)
yield 'K Means clusters', y
learner = SpectralClustering(n_clusters=2)
y = learner.fit_predict(X)
yield 'Spectral clusters', y
learner = AgglomerativeClustering(n_clusters=2)
y = learner.fit_predict(X)
yield 'Agglomerative clusters (N=2)', y
learner = AgglomerativeClustering(n_clusters=5)
y = learner.fit_predict(X)
yield 'Agglomerative clusters (N=5)', y
# =====================================================================
def plot(Xs, predictions):
'''
Create a plot comparing multiple learners.
`Xs` is a list of tuples containing:
(title, x coord, y coord)
`predictions` is a list of tuples containing
(title, predicted classes)
All the elements will be plotted against each other in a
two-dimensional grid.
'''
# We will use subplots to display the results in a grid
nrows = len(Xs)
ncols = len(predictions)
fig = plt.figure(figsize=(16, 8))
fig.canvas.set_window_title('Clustering data from ' + URL)
# Show each element in the plots returned from plt.subplots()
for row, (row_label, X_x, X_y) in enumerate(Xs):
for col, (col_label, y_pred) in enumerate(predictions):
ax = plt.subplot(nrows, ncols, row * ncols + col + 1)
if row == 0:
plt.title(col_label)
if col == 0:
plt.ylabel(row_label)
# Plot the decomposed input data and use the predicted
# cluster index as the value in a color map.
plt.scatter(X_x, X_y, c=y_pred.astype(np.float), cmap='prism', alpha=0.5)
# Set the axis tick formatter to reduce the number of ticks
ax.xaxis.set_major_locator(MaxNLocator(nbins=4))
ax.yaxis.set_major_locator(MaxNLocator(nbins=4))
# Let matplotlib handle the subplot layout
plt.tight_layout()
# ==================================
# Display the plot in interactive UI
plt.show()
# To save the plot to an image file, use savefig()
#plt.savefig('plot.png')
# Open the image file with the default image viewer
#import subprocess
#subprocess.Popen('plot.png', shell=True)
# To save the plot to an image in memory, use BytesIO and savefig()
# This can then be written to any stream-like object, such as a
# file or HTTP response.
#from io import BytesIO
#img_stream = BytesIO()
#plt.savefig(img_stream, fmt='png')
#img_bytes = img_stream.getvalue()
#print('Image is {} bytes - {!r}'.format(len(img_bytes), img_bytes[:8] + b'...'))
# Closing the figure allows matplotlib to release the memory used.
plt.close()
# =====================================================================
if __name__ == '__main__':
# Download the data set from URL
print("Downloading data from {}".format(URL))
frame = download_data()
# Process data into a feature array
# This is unsupervised learning, and so there are no labels
print("Processing {} samples with {} attributes".format(len(frame.index), len(frame.columns)))
X = get_features(frame)
# Run multiple dimensionality reduction algorithms on the data
print("Reducing dimensionality")
Xs = list(reduce_dimensions(X))
# Evaluate multiple clustering learners on the data
print("Evaluating clustering learners")
predictions = list(evaluate_learners(X))
# Display the results
print("Plotting the results")
plot(Xs, predictions)
|
apache-2.0
|
sergeimoiseev/othodi_code
|
old/frontend_mpl_basemap.py
|
2
|
4318
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
# matplotlib.use('nbagg')
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
# import mpld3
# matplotlib.use('nbagg')
def plot_route(coord_pairs,annotes):
# matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
MIN_L_WIDTH=10
POINT_SIZE=2*MIN_L_WIDTH
fig = plt.figure("caption",figsize=(10,10))
ax = fig.add_subplot(111)
# colors_list = cm.rainbow(np.linspace(0,1,len(coord_pairs)))
ax.plot(*zip(*coord_pairs),ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
for i, txt in enumerate(annotes):
ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points')
# ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(1,1))
ax.set_xlim([0.9*min(zip(*coord_pairs)[0]),1.1*max(zip(*coord_pairs)[0])]) # must be after plot
ax.set_ylim([0.9*min(zip(*coord_pairs)[1]),1.1*max(zip(*coord_pairs)[1])])
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# mpld3.show() # bad rendering
plt.show()
# plot_route(coord_pairs,annotations)
# plot_route(list_of_coords_pairs,annotes4points)
from mpl_toolkits.basemap import Basemap
def plot_route_on_basemap(coord_pairs,annotes,added_points_param_list=None):
matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# matplotlib.use('nbagg')
fig=plt.figure(figsize=(16,12))
ax=fig.add_axes([0.05,0.05,0.95,0.95])
lat_list, lng_list = zip(*coord_pairs)
# setup mercator map projection.
m = Basemap(llcrnrlon=min(lng_list)-2,llcrnrlat=min(lat_list)-2,urcrnrlon=max(lng_list)+2,urcrnrlat=max(lat_list)+2,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=0.,lon_0=0.,lat_ts=0.)
MIN_L_WIDTH=7
POINT_SIZE=2*MIN_L_WIDTH
m.drawcoastlines()
m.fillcontinents()
x_all=[]
y_all=[]
for i,point in enumerate(coord_pairs):
lon = point[-1]
lat = point[0]
x,y = m(*[lon,lat])
x_all.append(x)
y_all.append(y)
if (i!=0 and i!=len(annotes)-1):
plt.annotate(annotes[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"))
plt.annotate(annotes[-1], xy=(x_all[-1],y_all[-1]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.annotate(annotes[0], xy=(x_all[0],y_all[0]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.plot(x_all,y_all,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
#----
# plt, m = add_points_to_basemap_plot(plt,m,[1,1])
#----
with open("x.txt",'w') as f:
pass
if added_points_param_list!=None:
added_points_coords = added_points_param_list[0]
names = added_points_param_list[1]
# x_added=[]
# y_added=[]
for i,point in enumerate(added_points_coords):
lat = point[0]
lon = point[-1]
x,y = m(*[lon,lat])
# x_added.append(x)
# y_added.append(y)
# if (i!=0 and i!=len(names)-1):
# plt.annotate(names[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7), ec="none"))
plt.annotate(names[i], xy=(x,y), xytext=(0,-POINT_SIZE*2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7)))
plt.plot(x,y,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='pink')
with open("x.txt",'a') as f:
f.write("plotted %f,%f\n" % (x,y))
# draw parallels
m.drawparallels(np.arange(-20,0,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
# ax.set_title('Great Circle from New York to London')
# m.bluemarble()
plt.show()
# mpld3.show() # bad rendering
if __name__ == "__main__":
print('No test yet.')
|
mit
|
ProgramFan/bentoo
|
bentoo/tools/viewer.py
|
1
|
16796
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
bentoo-viewer.py - view bentoo-calltree-analyser results graphically
This tool visualizes datasets generated by bentoo-calltree-analyser.
'''
from builtins import range
from builtins import object
import os
import sqlite3
import argparse
import numpy
import pandas
import json
import re
import toyplot
import toyplot.color
import toyplot.svg
import toyplot.pdf
import toyplot.png
import toyplot.browser
#
# data reader
#
import collections
try:
import yaml
def dict_representer(dumper, data):
return dumper.represent_dict(iter(data.items()))
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
dict_constructor)
def loads(string, *args, **kwargs):
return yaml.load(string, *args, **kwargs)
except ImportError:
import json
def loads(string, *args, **kwargs):
return json.loads(
string, object_pairs_hook=collections.OrderedDict, *args, **kwargs)
#
# Auxiliary functions
#
def draw_line(axes, x0, y0, x1, y1, *args, **kwargs):
axes.plot([x0, x1], [y0, y1], *args, **kwargs)
def draw_points(axes, data, *args, **kwargs):
x = [c[0][0] for c in data]
y = [c[0][1] for c in data]
color = [c[1] for c in data]
axes.scatterplot(x, y, color=color, *args, **kwargs)
#
# Layout algorithm for tree representation
#
class UniqueId(object):
def __init__(self, start=0):
self.value = start
def id(self):
self.value += 1
return self.value - 1
def build_tree(data):
def flat_to_recursive(nodes):
if not nodes:
return []
result = []
min_level = min(x[0] for x in nodes)
assert (nodes[0][0] == min_level)
nid, children = (nodes[0][1], [])
for l, n in nodes[1:]:
if l == min_level:
result.append({
"id": nid,
"children": flat_to_recursive(children)
})
nid, children = (n, [])
else:
children.append((l, n))
result.append({"id": nid, "children": flat_to_recursive(children)})
return result
nodes = [(data["level"][i], data["id"][i])
for i in range(len(data["id"]))]
trees = flat_to_recursive(nodes)
assert (len(trees) == 1)
return trees[0]
def compute_layout(data, colors):
tree = build_tree(data)
# id -> (level, name)
rows = {}
rowids = {}
uuid = UniqueId()
def compute_rows(tree, level=0):
if not tree:
return
node_id = uuid.id()
rows[node_id] = (level, tree["id"])
rowids[tree["id"]] = node_id
for c in tree["children"]:
compute_rows(c, level + 1)
compute_rows(tree)
lines = []
def compute_lines(tree, level=0):
if not tree:
return
children = []
for c in tree["children"]:
children.append(rowids[c["id"]])
if not children:
return
start_row = rowids[tree["id"]]
end_row = children[-1]
rows = children
color = colors[children[0]]
lines.append(((start_row, end_row, level), rows, color))
for c in tree["children"]:
compute_lines(c, level + 1)
compute_lines(tree)
return {"lines": lines, "rows": rows}
def draw_stem(axes, layout, indent=0.1):
row_max = len(layout["rows"]) - 1
# compute graph elements
lines = []
end_points = []
corner_points = []
for (start_row, end_row, level), rows, color in layout["lines"]:
# NOTE: the axes grows from bottom to top, so reverse it.
p0 = ((level + 0.5) * indent, row_max - start_row - 0.5)
p1 = ((level + 0.5) * indent, row_max - end_row)
end_points.append([p0, color])
corner_points.append([p1, color])
lines.append([p0[0], p0[1], p1[0], p1[1], color])
for row in rows:
p0 = ((level + 0.5) * indent, row_max - row)
p1 = ((level + 1) * indent, row_max - row)
lines.append([p0[0], p0[1], p1[0], p1[1], color])
end_points.append([p1, color])
# draw graph elements
for l in lines:
draw_line(
axes,
l[0],
l[1],
l[2],
l[3],
style={"stroke-width": 2},
color=l[4])
draw_points(axes, end_points, size=4, marker="o")
draw_points(axes, corner_points, size=1, marker="o")
def draw_nodes(axes, layout, indent=0.1):
rows = layout["rows"]
ids = sorted(rows.keys())
level = [rows[k][0] for k in ids]
# NOTE: the axes grows from bottom to top, so reverse it.
space = [x * indent for x in level][::-1]
block = [1 - x for x in space]
series = numpy.column_stack([space, block])
axes.x.domain.max = 1
axes.bars(series, along="y")
def compute_color_index(data):
uuid = UniqueId()
def assign_color(tree):
if not tree:
return
if not tree.get("children", []):
return
color = uuid.id()
for c in tree["children"]:
c["color"] = color
for c in tree["children"]:
assign_color(c)
tree = build_tree(data)
tree["color"] = uuid.id()
assign_color(tree)
colors = {}
def get_colors(tree):
if not tree:
return
colors[tree["id"]] = tree["color"]
for c in tree["children"]:
get_colors(c)
get_colors(tree)
return [colors[n] for n in data["id"]]
def compute_color(colormap, tree):
color_index = compute_color_index(tree)
return [colormap.color(i) for i in color_index]
def draw_tree(axes, data, colors, indent=0.05, theme="dark"):
assert theme in ("light", "dark")
data_1 = [x * indent for x in data["level"]][::-1]
data_2 = [1 - x for x in data_1]
series = numpy.column_stack([data_1, data_2])
data_2_color = colors[::-1]
data_1_color = []
for c in data_2_color:
color = toyplot.color.rgba(c["r"], c["g"], c["b"], 0.1)
data_1_color.append(color)
if theme != "dark":
data_2_color = data_1_color
axes.bars(
series, along="y", color=numpy.array([data_1_color, data_2_color]).T)
axes.x.domain.max = 1
text_color = "white" if theme == "dark" else "black"
for i, n in enumerate(data["name"][::-1]):
axes.text(
data_1[i],
i,
n,
color=text_color,
style={"text-anchor": "start",
"-toyplot-anchor-shift": "5px"})
layout = compute_layout(data, colors)
draw_stem(axes, layout, indent)
def draw_percent(axes, data, masks, colors, data_format=".1f"):
if masks is not None:
data = data.where(masks)
data = data[::-1]
axes.bars(data, along="y", color=colors[::-1])
axes.x.domain.min = 0
for i, n in enumerate(data):
format_spec = "{:%s}%%" % data_format
axes.text(
n,
i,
format_spec.format(n * 100),
color="black",
style={"text-anchor": "start",
"-toyplot-anchor-shift": "5px"})
def draw_bars(axes, data, colors, data_format=".1f"):
data = data[::-1]
axes.bars(data, along="y", color=colors[::-1])
for i, n in enumerate(data):
format_spec = "{:%s}" % data_format
axes.text(
n,
i,
format_spec.format(n),
color="black",
style={"text-anchor": "start",
"-toyplot-anchor-shift": "5px"})
def compute_header_shape(spec, ignore_root=True):
shape = {"hrows": 0, "hcols": 0}
def visit(spec, curr_level=0):
if "subgroups" in spec:
for s in spec["subgroups"]:
visit(s, curr_level + 1)
else:
assert "type" in spec
assert "data" in spec
if "colspan" in spec:
shape["hcols"] += spec["colspan"]
else:
shape["hcols"] += 1
shape["hrows"] = max(shape["hrows"], curr_level + 1)
visit(spec)
if ignore_root:
shape["hrows"] -= 1
return (shape["hrows"], shape["hcols"])
def update_colspans(spec, ignore_root=True):
hrows, _ = compute_header_shape(spec, ignore_root)
def update_(spec, curr_level=0):
if "subgroups" in spec:
next_level = curr_level + 1
if ignore_root and curr_level == 0:
next_level = curr_level
for s in spec["subgroups"]:
update_(s, next_level)
if "colspan" not in spec:
spec["colspan"] = sum(x["colspan"] for x in spec["subgroups"])
if "rowspan" not in spec:
spec["rowspan"] = 1
else:
if "colspan" not in spec:
spec["colspan"] = 1
if "rowspan" not in spec:
spec["rowspan"] = hrows - curr_level
update_(spec)
def create_table(canvas, spec, data, ignore_root=True):
hrows, hcols = compute_header_shape(spec, ignore_root)
table = canvas.table(rows=len(data), columns=hcols, trows=hrows)
colid = {"value": 0}
colwidths = {}
def compute_column_widths(spec):
if "subgroups" in spec:
for s in spec["subgroups"]:
compute_column_widths(s)
else:
colspan = 1
if "colspan" in spec:
colspan = spec["colspan"]
if "width" in spec:
for i in range(colspan):
colwidths[colid["value"] + i] = spec["width"]
colid["value"] += colspan
compute_column_widths(spec)
for i in range(hcols):
if i in colwidths:
table.column(i).width = colwidths[i]
return table
def draw_grid(table):
table.header.grid.style = {"stroke": "white"}
table.grid.hlines[...] = "single"
table.grid.vlines[...] = "single"
table.grid.style = {"stroke": "gainsboro", "stroke-width": 1}
for i in range(table.header.rows):
table.header.row(i).height = 30
def draw_header(table, spec, colormap, ignore_root=True):
def draw_(spec, row_start=0, col_start=0, curr_level=0):
if not ignore_root or curr_level > 0:
nrows = spec["rowspan"]
ncols = spec["colspan"]
merged = table.header.cell(
row_start, col_start, rowspan=nrows, colspan=ncols).merge()
merged.data = spec["label"]
merged.align = "center"
merged.valign = "center"
merged.lstyle = {
"font-size": "20px",
"font-weight": "bold",
"fill": "white"
}
merged.bstyle = {"fill": "darkmagenta", "stroke": "none"}
row_start += 1
if "subgroups" in spec:
for s in spec["subgroups"]:
draw_(s, row_start, col_start, curr_level + 1)
col_start += s["colspan"]
draw_(spec)
def draw_body(table, spec, data, colormap):
colors = {"value": None}
def draw_(spec, col_start=0):
if "subgroups" in spec:
for s in spec["subgroups"]:
draw_(s, col_start)
col_start += s["colspan"]
else:
tp = spec["type"]
if not colors["value"]:
assert tp == "tree"
if tp == "tree":
assert (len(spec["data"]) == 3)
tree_data = data[spec["data"]]
tree_data.columns = ["id", "level", "name"]
colors["value"] = compute_color(colormap, tree_data)
axes = table.body.column[col_start].cartesian()
theme = spec.get("theme", "light")
draw_tree(axes, tree_data, colors["value"], theme=theme)
elif tp == "percent":
axes = table.body.column[col_start].cartesian()
column_data = data[spec["data"]]
data_format = spec.get("format", ".1f")
masks_expr = spec.get("mask", None)
masks = None
if masks_expr:
masks = eval(masks_expr)
draw_percent(axes, column_data, masks, colors["value"],
data_format)
elif tp == "bars":
axes = table.body.column[col_start].cartesian()
column_data = data[spec["data"]]
data_format = spec.get("format", ".1f")
draw_bars(axes, column_data, colors["value"], data_format)
elif tp == "matrix":
column_data = data[spec["data"]]
masks_expr = spec.get("mask", None)
if masks_expr:
masks = eval(masks_expr)
column_data = column_data.where(masks)
if "format" in spec:
formatter = toyplot.format.FloatFormatter(
spec["format"], nanshow=False)
else:
formatter = toyplot.format.FloatFormatter(nanshow=False)
for i, n in enumerate(spec["data"]):
table.body.column[col_start + i].data = column_data[n]
table.body.column[col_start + i].format = formatter
elif tp == "raw":
column_data = data[spec["data"]]
masks_expr = spec.get("mask", None)
if masks_expr:
masks = eval(masks_expr)
column_data = column_data.where(masks)
table.body.column[col_start].data = column_data
if "format" in spec:
formatter = toyplot.format.FloatFormatter(
spec["format"], nanshow=False)
else:
formatter = toyplot.format.FloatFormatter(nanshow=False)
table.body.column[col_start].format = formatter
else:
raise ValueError("Unknown drawing type: '%s'" % tp)
draw_(spec)
def draw_table(canvas, spec, data, colormap="Set1", ignore_root=True):
spec = spec.copy()
update_colspans(spec)
colormap = toyplot.color.brewer.map(colormap)
table = create_table(canvas, spec, data, ignore_root)
draw_grid(table)
draw_header(table, spec, colormap, ignore_root)
draw_body(table, spec, data, colormap)
def view_data(ref_db,
spec_file,
sql=None,
width=800,
height=600,
colormap="Set1",
no_ignore_root=False,
save=None):
conn = sqlite3.connect(ref_db)
real_sql = "SELECT * FROM result ORDER BY abs_seq"
if sql:
real_sql = "SELECT * FROM ({}) ORDER BY abs_seq".format(sql)
data = pandas.read_sql_query(real_sql, conn)
spec_text = file(spec_file).read()
if os.path.splitext(spec_file)[-1] == ".json":
spec_text = re.sub(r"//.*", "", spec_text)
spec = loads(spec_text)
canvas = toyplot.canvas.Canvas(width, height)
draw_table(canvas, spec, data, colormap, not no_ignore_root)
if save:
_, ext = os.path.splitext(save)
if ext == ".png":
toyplot.png.render(canvas, save)
elif ext == ".pdf":
toyplot.pdf.render(canvas, save)
elif ext == ".svg":
toyplot.svg.render(canvas, save)
else:
raise ValueError("Unknown save file type: '%s'" % ext)
toyplot.browser.show(canvas, spec["label"])
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("ref_db", help="Database containing analyser results")
parser.add_argument(
"spec_file", help="Json file specifying the visualization")
parser.add_argument(
"--sql", default=None, help="Sql query to extract a subset of data")
parser.add_argument(
"--width", default=800, type=int, help="Canvas width, in px")
parser.add_argument(
"--height", default=600, type=int, help="Canvas height, in px")
parser.add_argument(
"--colormap",
default="Set1",
help="Color Brewer colormap to use (default: Set1)")
parser.add_argument(
"--no-ignore-root",
action="store_true",
help="Draw root label on the canvas")
parser.add_argument("--save", default=None, help="Save canvas to file")
args = parser.parse_args()
view_data(**vars(args))
if __name__ == "__main__":
main()
|
mit
|
mwaddoups/deaqon
|
models.py
|
1
|
3221
|
import numpy as np
from sklearn.linear_model import SGDRegressor, SGDClassifier
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.optimizers import Adam
class BaseModel:
def init(self, n_obs, n_act):
pass
def predict(self, data):
pass
def fit(self, X, y):
pass
class SGDRegModel(BaseModel):
def __init__(self, **kwargs):
self.models = []
self.model_kwargs = kwargs
def init(self, n_obs, n_act):
self.models = []
for i in xrange(n_act):
model = SGDRegressor(**self.model_kwargs)
model.partial_fit(np.random.rand(1, n_obs),
np.random.rand(1))
self.models.append(model)
def predict(self, data):
predictions = np.zeros((data.shape[0], len(self.models)))
for i, model in enumerate(self.models):
predictions[:, i] = model.predict(data)
return predictions
def fit(self, X, y):
for i, model in enumerate(self.models):
model.partial_fit(X, y[:, i].ravel())
class SGDClfModel(SGDRegModel):
def init(self, n_obs, n_act):
self.models = []
for i in xrange(n_act):
model = SGDClassifier(**self.model_kwargs)
model.partial_fit(np.random.rand(1, n_obs), [0], classes=[0, 1])
self.models.append(model)
def predict(self, data):
predictions = np.zeros((data.shape[0], len(self.models)))
for i, model in enumerate(self.models):
predictions[:, i] = model.predict_proba(data)[:, 1]
predictions /= np.sum(predictions, axis=1).reshape(-1, 1)
return predictions
class NNRegModel(BaseModel):
def __init__(self, hidden_layers, optimizer=None):
if optimizer is None:
optimizer = Adam()
self.hidden_layers = hidden_layers
self.optimizer = optimizer
def init(self, n_obs, n_act):
inputs = Input(shape=(n_obs,))
hidden = inputs
for num_nodes, dropout in self.hidden_layers:
hidden = Dense(num_nodes, activation='relu')(hidden)
if dropout > 0:
hidden = Dropout(dropout)(hidden)
outputs = Dense(n_act, activation='linear')(hidden)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='mse', optimizer=self.optimizer)
self.model = model
def predict(self, data):
return self.model.predict(data, verbose=0)
def fit(self, X, y):
self.model.train_on_batch(X, y)
class NNClfModel(NNRegModel):
def init(self, n_obs, n_act):
inputs = Input(shape=(n_obs,))
hidden = inputs
for num_nodes, dropout in self.hidden_layers:
hidden = Dense(num_nodes, activation='relu')(hidden)
if dropout > 0:
hidden = Dropout(dropout)(hidden)
outputs = Dense(n_act, activation='softmax')(hidden)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer=self.optimizer)
self.model = model
|
mit
|
h2educ/scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
225
|
5903
|
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
|
bsd-3-clause
|
adpozuelo/Master
|
RC/PEC2/ws.py
|
1
|
2665
|
## RC - UOC - URV - PEC2
## [email protected]
## Watts-Strogatz (WS)
## run with 'python3 ws.py'
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import random
import math
random.seed(1)
def create_network(n, p, k):
G = nx.Graph()
kd2 = int(k / 2) + 1
for ni in range(1, n + 1):
G.add_node(ni)
for nj in range(ni + 1, ni + kd2):
if nj > n:
nj %= n
G.add_edge(ni, nj)
for ni in range(1, n + 1):
for nj in range(ni + 1, ni + kd2):
if random.uniform(0, 1) < p:
if nj > n:
nj %= n
G.remove_edge(ni, nj)
rn = random.randint(1, n)
while G.has_edge(ni, rn) or rn == ni:
rn = random.randint(1, n)
G.add_edge(ni, rn)
if n >= 1000:
nx.draw_networkx(G, node_size=4, with_labels=False)
else:
nx.draw_networkx(G, nx.circular_layout(G), node_size=4, with_labels=False)
plt.title('n = ' + str(n) + ', p = ' + str(p) + ', k = ' + str(k))
filename = 'ws_n' + str(n) + '_p' + str(p) + '_k' + str(k) + '_net.png'
plt.savefig(filename)
#plt.show()
plt.clf()
histo = nx.degree_histogram(G)
total = sum(histo)
norm_histo = np.divide(histo, total)
length = len(norm_histo)
kn = np.arange(length)
plt.plot(kn, norm_histo, 'r-', label = 'empirical')
kd2 -= 1
diracdelta = np.empty(length)
# https://en.wikipedia.org/wiki/Watts%E2%80%93Strogatz_model
for ki in range(0, length):
if ki >= kd2:
sumatory = np.empty(min(ki - kd2, kd2) + 1)
for ndi in range(0, len(sumatory)):
sumatory[ndi] = (math.factorial(kd2) / (math.factorial(ndi) * math.factorial( kd2 - ndi))) * ((1 - p) ** ndi) * (p ** (kd2 - ndi)) * ((p * kd2) ** (ki - kd2 - ndi)) * math.exp(-p * kd2) / math.factorial(ki - kd2 - ndi)
diracdelta[ki] = sum(sumatory)
else:
diracdelta[ki] = 0.0
plt.plot(kn, diracdelta, 'b-', label = 'dirac delta')
plt.title('n = ' + str(n) + ', p = ' + str(p) + ', k = ' + str(k))
plt.xlabel('Grado k')
plt.ylabel('Fracción de nodos')
plt.legend(loc = 1)
filename = 'ws_n' + str(n) + '_p' + str(p) + '_k' + str(k) + '_dg.png'
plt.savefig(filename)
#plt.show()
plt.clf()
return
n = [50, 100, 1000, 10000]
p = [0.0, 0.1, 0.2, 0.5, 0.9, 1.0]
k = [4, 8, 16, 24]
for ni in n:
for pi in p:
for ki in k:
create_network(ni, pi, ki)
|
gpl-3.0
|
t-davidson/command-line-grading
|
embedding.py
|
1
|
4306
|
"""File to test out neural embeddings"""
import pickle
import numpy as np
import pandas as pd
import string
from nltk.util import ngrams
from gensim import utils
from gensim.models import doc2vec
from sklearn.linear_model import LogisticRegression
from random import seed
from random import shuffle
from paper_classifier import *
from sklearn import linear_model
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_selection import SelectFromModel, SelectKBest, chi2
#Tag doc2vec.TaggedDocument(bow, [count])
#Access model.docvecs[count]
def clean_text(text):
text = ''.join(ch for ch in text if ch not in string.punctuation)
return text.lower()
def add_bigrams(tokens):
bigrams=ngrams(tokens,2)
for pair in bigrams:
bigram = pair[0]+' '+pair[1]
tokens.append(bigram)
return tokens
def doc_iterator(df):
"""Parses text documents from the essay field of the
dataframe, cleans text, tokenizes, and returns it
as an iterator"""
for i in range(0, df.shape[0]):
yield clean_text(df.essay.iloc[i]).split()
#tokens = clean_text(df.essay.iloc[i]).split()
#tokens = add_bigrams(tokens)
#yield tokens
###Runs out of memory if bigrams included!
def tagged_iterator(text_iterator):
"""Processes texts in the doc_iterator and returns
an iterator of tagged documents"""
count=0
for bow in text_iterator:
if len(bow) > 0:
yield doc2vec.TaggedDocument(bow, [count])
count += 1
print count-1
def docs_shuffle(iterator):
"""Shuffles the iterator"""
list_of_docs = []
for i in iterator:
list_of_docs.append(i)
shuffle(list_of_docs)
for d in list_of_docs:
yield d
def build_X(df, model, size):
X = np.zeros((df.shape[0], size))
for i in range(0, df.shape[0]):
col = model.docvecs[i]
X[i] = col
return pd.DataFrame(X)
def LogisticRegWithSelection(X, y, threshold):
#First model to select best features
model = linear_model.LogisticRegression(C=1.0, penalty='l1',
class_weight='balanced')
kfold(X, y, model, 5, False)
#y_pred = model.predict(X)
#print y_pred
SFM = SelectFromModel(model, prefit=True,threshold=threshold)
X_new = SFM.transform(X)
#Second model to run on reduced feature set
model2 = linear_model.LogisticRegression(C=1.0, penalty='l2',
class_weight='balanced')
kfold(pd.DataFrame(X_new), y, model2, 5, False)
y_pred = model2.predict(X_new)
print y_pred
def LogisticRegWithOVR(X, y):
###Performs badly with high dim vector
model = linear_model.LogisticRegression(C=10.0, penalty='l1',
class_weight='balanced')
P = OneVsRestClassifier(model)
kfold(X, y, P, 5, True)
#y_pred = P.predict(X)
#print y_pred
if __name__ in '__main__':
df = pickle.load(open('week4_model_table.p', 'rb'))
df = df[df.essay != ''] #these conditions filter essays w/o content
df = df[df.essay != ' ']
df = df[df.grade != 70] #A mislabelled entry
df = df[df.grade != 0] #Remove zero entries
df = df[df.grade != 66] ##Remove ungraded (for now)
print df.shape
docs = doc_iterator(df)
tagged = tagged_iterator(docs)
#tagged = docs_shuffle(tagged) #shuffle order of tagged
size=10000
####Odd, when I don't do feature selection I get junk if size > 100
###but when I do feature selection I get better results with larger size
model = doc2vec.Doc2Vec(
tagged,
size=size,
min_count=3,
workers=4,
iter=20,
)
#for epoch in range(100): ###This appears to make no difference
# seed(randint(0,100))
# tagged = docs_shuffle(tagged)
# model.train(tagged)
print model.most_similar('heat')
model.save('doc2vecmodel')
#print model.docvecs[767]
#model.build_vocab(tagged) #I think my code does this by including tagged in model spec
#Running multiclass classifier
X = build_X(df, model, size)
section = df.section
X.section = section
y = df.grade
LogisticRegWithOVR(X, y)
#Single class
y = df.excellent
LogisticRegWithSelection(X, y, 'mean')
|
gpl-3.0
|
OshynSong/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
59
|
76336
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
se4u/pylearn2
|
pylearn2/train_extensions/plots.py
|
34
|
9617
|
"""
Plot monitoring extensions while training.
"""
__authors__ = "Laurent Dinh"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Laurent Dinh"]
__license__ = "3-clause BSD"
__maintainer__ = "Laurent Dinh"
__email__ = "dinhlaur@iro"
import logging
import os
import os.path
import stat
import numpy
np = numpy
from pylearn2.train_extensions import TrainExtension
from theano.compat.six.moves import xrange
from pylearn2.utils import as_floatX, wraps
if os.getenv('DISPLAY') is None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
log = logging.getLogger(__name__)
def make_readable(fn):
"""
Make a file readable by all.
Practical when the plot is in your public_html.
Parameters
----------
fn : str
Filename you wish to make public readable.
"""
st = os.stat(fn)
# Create the desired permission
st_mode = st.st_mode
read_all = stat.S_IRUSR
read_all |= stat.S_IRGRP
read_all |= stat.S_IROTH
# Set the permission
os.chmod(fn, st_mode | read_all)
def get_best_layout(n_plots):
"""
Find the best basic layout for a given number of plots.
Minimize the perimeter with a minimum area (``n_plots``) for
an integer rectangle.
Parameters
----------
n_plots : int
The number of plots to display
Returns
-------
n_rows : int
Number of rows in the layout
n_cols :
Number of columns in the layout
"""
assert n_plots > 0
# Initialize the layout
n_rows = 1
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_cols + 1
# Limit the range of possible layouts
max_row = np.sqrt(n_plots)
max_row = np.round(max_row)
max_row = int(max_row)
for l in xrange(1, max_row + 1):
width = np.ceil(n_plots*1./l)
width = int(width)
if half_perimeter >= (width + l):
n_rows = l
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_rows + n_cols
return n_rows, n_cols
def create_colors(n_colors):
"""
Create an array of n_colors
Parameters
----------
n_colors : int
The number of colors to create
Returns
-------
colors_rgb : np.array
An array of shape (n_colors, 3) in RGB format
"""
# Create the list of color hue
colors_hue = np.arange(n_colors)
colors_hue = as_floatX(colors_hue)
colors_hue *= 1./n_colors
# Set the color in HSV format
colors_hsv = np.ones((n_colors, 3))
colors_hsv[:, 2] *= .75
colors_hsv[:, 0] = colors_hue
# Put in a matplotlib-friendly format
colors_hsv = colors_hsv.reshape((1, )+colors_hsv.shape)
# Convert to RGB
colors_rgb = matplotlib.colors.hsv_to_rgb(colors_hsv)
colors_rgb = colors_rgb[0]
return colors_rgb
class Plotter(object):
"""
Base class for plotting.
Parameters
----------
freq : int, optional
The number of epochs before producing plot.
Default is None (set by the PlotManager).
"""
def __init__(self, freq=None):
self.filenames = []
self.freq = freq
def setup(self, model, dataset, algorithm):
"""
Setup the plotters.
Parameters
----------
model : pylearn2.models.Model
The model trained
dataset : pylearn2.datasets.Dataset
The dataset on which the model is trained
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The algorithm the model is trained with
"""
raise NotImplementedError(str(type(self))+" does not implement setup.")
def plot(self):
"""
The method that draw and save the desired figure, which depend
on the object and its attribute. This method is called by the
PlotManager object as frequently as the `freq` attribute defines it.
"""
raise NotImplementedError(str(type(self))+" does not implement plot.")
def set_permissions(self, public):
"""
Make the produced files readable by everyone.
Parameters
----------
public : bool
If public is True, then the associated files are
readable by everyone.
"""
if public:
for filename in self.filenames:
make_readable(filename)
class Plots(Plotter):
"""
Plot different monitors.
Parameters
----------
channel_names : list of str
List of monitor channels to plot
save_path : str
Filename of the plot file
share : float, optional
The percentage of epochs shown. Default is .8 (80%)
per_second : bool, optional
Set if the x-axis is in seconds, in epochs otherwise.
Default is False.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, channel_names,
save_path, share=.8,
per_second=False,
** kwargs):
super(Plots, self).__init__(** kwargs)
if not save_path.endswith('.png'):
save_path += '.png'
self.save_path = save_path
self.filenames = [self.save_path]
self.channel_names = channel_names
self.n_colors = len(self.channel_names)
self.colors_rgb = create_colors(self.n_colors)
self.share = share
self.per_second = per_second
@wraps(Plotter.setup)
def setup(self, model, dataset, algorithm):
self.model = model
@wraps(Plotter.plot)
def plot(self):
monitor = self.model.monitor
channels = monitor.channels
channel_names = self.channel_names
# Accumulate the plots
plots = np.array(channels[channel_names[0]].val_record)
plots = plots.reshape((1, plots.shape[0]))
plots = plots.repeat(self.n_colors, axis=0)
for i, channel_name in enumerate(channel_names[1:]):
plots[i+1] = np.array(channels[channel_name].val_record)
# Keep the relevant part
n_min = plots.shape[1]
n_min -= int(np.ceil(plots.shape[1] * self.share))
plots = plots[:, n_min:]
# Get the x axis
x = np.arange(plots.shape[1])
x += n_min
# Put in seconds if needed
if self.per_second:
seconds = channels['training_seconds_this_epoch'].val_record
seconds = np.array(seconds)
seconds = seconds.cumsum()
x = seconds[x]
# Plot the quantities
plt.figure()
for i in xrange(self.n_colors):
plt.plot(x, plots[i], color=self.colors_rgb[i],
alpha=.5)
plt.legend(self.channel_names)
plt.xlim(x[0], x[-1])
plt.ylim(plots.min(), plots.max())
plt.axis('on')
plt.savefig(self.save_path)
plt.close()
class PlotManager(TrainExtension):
"""
Class to manage the Plotter classes.
Parameters
----------
plots : list of pylearn2.train_extensions.Plotter
List of plots to make during training
freq : int
The default number of epochs before producing plot.
public : bool
Whether the files are made public or not. Default is true.
html_path : str
The path where the HTML page is saved. The associated files should be
in the same folder. Default is None, then there is no HTML page.
"""
def __init__(self, plots, freq, public=True, html_path=None):
self.plots = plots
self.freq = freq
# Set a default freq
for plot in self.plots:
if plot.freq is None:
plot.freq = self.freq
self.public = public
self.html_path = html_path
self.filenames = []
self.count = 0
@wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
for plot in self.plots:
plot.setup(model, dataset, algorithm)
for filename in plot.filenames:
warn = ("/home/www-etud/" in filename)
warn |= (os.environ['HOME'] in filename)
warn &= ('umontreal' in os.environ['HOSTNAME'])
if warn:
warnings.warn('YOU MIGHT RUIN THE NFS'
'BY SAVING IN THIS PATH !')
self.filenames.append(filename)
if self.html_path is not None:
header = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<html xmlns="http://www.w3.org/1999/xhtml"'
'xml:lang="en">\n'
'\t<body>\n')
footer = ('\t</body>\n'
'</html>')
body = ''
for filename in self.filenames:
basename = os.path.basename(filename)
body += '<img src = "' + basename + '"><br/>\n'
with open(self.html_path, 'w') as f:
f.write(header + body + footer)
f.close()
if self.public:
make_readable(self.html_path)
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
self.count += 1
for plot in self.plots:
if self.count % plot.freq == 0:
try:
plot.plot()
plot.set_permissions(self.public)
except Exception as e:
warnings.warn(str(plot) + ' has failed.\n'
+ str(e))
|
bsd-3-clause
|
yunfeilu/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
48
|
47506
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
|
bsd-3-clause
|
vigilv/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
mattcaldwell/zipline
|
zipline/utils/cli.py
|
4
|
6275
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
DEFAULTS = {
'start': '2012-01-01',
'end': '2012-12-31',
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL'
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', choices=('yahoo',))
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = pd.Timestamp(kwargs['start'], tz='UTC')
end = pd.Timestamp(kwargs['end'], tz='UTC')
symbols = kwargs['symbols'].split(',')
if kwargs['source'] == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'))
perf = algo.run(source)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
|
apache-2.0
|
allrod5/extra-trees
|
benchmarks/classification/execution_time.py
|
1
|
1701
|
# From MCZA015-13 class notes, modified by Felipe Anchieta Santos Costa
# License: BSD Style.
import time
import numpy as np
from sklearn.datasets import load_breast_cancer, load_iris, load_wine
from sklearn.ensemble import ExtraTreesClassifier as SKExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from extra_trees.ensemble.forest import ExtraTreesClassifier
classification_models = [
('SVC', SVC()),
('RandomForest', RandomForestClassifier()),
('ExtraTrees (SciKit)', SKExtraTreesClassifier()),
('ExtraTrees', ExtraTreesClassifier()),
]
classification_data_sets = [
('breast_cancer', load_breast_cancer(return_X_y=True)),
('iris', load_iris(return_X_y=True)),
('wine', load_wine(return_X_y=True)),
]
for data_name, data_set in classification_data_sets:
print("{}\n".format(data_name) + '*' * len(data_name))
X, y = data_set
train_size = (len(X) // 4) * 3 # ~75% for training
test_size = len(X) - train_size # ~25% for testing
# do some random magic stuff
fx = np.arange(len(X))
np.random.shuffle(fx)
for name, model in classification_models:
times = []
print("model: {}\n=======".format(name) + "=" * len(name))
train = X[fx[0:train_size],:]
test = X[fx[train_size:],:]
for _ in range(10):
start = time.time()
model.fit(train, y[fx[0:train_size]])
model.predict(test)
model.score(test, y[fx[train_size:]])
end = time.time()
times.append(end - start)
print("time mean={}".format(np.mean(times)))
print("time stdev={}\n".format(np.std(times)))
|
mit
|
tomlof/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
42
|
4843
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
# Plot an image representing the pixelwise variance provided by the
# estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,
# via the PCA decomposition, also provides a scalar noise_variance_
# (the mean of pixelwise variance) that cannot be displayed as an image
# so we skip it.
if (hasattr(estimator, 'noise_variance_') and
estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
|
bsd-3-clause
|
jamestwebber/scipy
|
scipy/interpolate/fitpack.py
|
2
|
25621
|
from __future__ import print_function, division, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-D space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext)
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
|
bsd-3-clause
|
jswanljung/iris
|
lib/iris/tests/unit/plot/test_outline.py
|
11
|
3161
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.outline` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.outline(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.outline(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.outline(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.outline(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.outline,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord('foo')
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord('bar')
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.pcolormesh')
self.draw_func = iplt.outline
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
RosesTheN00b/BudgetButlerWeb
|
butler_offline/core/DBManager.py
|
1
|
6017
|
'''
Read panda files
'''
from _io import StringIO
from butler_offline.core import file_system
from butler_offline.core.database import Database
import pandas as pd
KEYWORD_EINZELBUCHUNGEN = 'Einzelbuchungen'
KEYWORD_DAUERAUFRTAEGE = 'Dauerauftraege'
KEYWORD_GEMEINSAME_BUCHUNGEN = 'Gemeinsame Buchungen'
KEYWORD_SPARBUCHUNGEN = 'Sparbuchungen'
KEYWORD_SPARKONTOS = 'Sparkontos'
KEYWORD_DEPOTWERTE = 'Depotwerte'
KEYWORD_ORDER = 'Order'
KEYWORD_ORDERDAUERAUFTRAG = 'Dauerauftr_Ordr'
KEYWORD_DEPOTAUSZUEGE = 'Depotauszuege'
KEYWORD_LINEBREAK = '\n'
def _to_table(content):
return pd.read_csv(StringIO(content))
def read(nutzername, ausgeschlossene_kategorien):
if not file_system.instance().read(database_path_from(nutzername)):
neue_datenbank = Database(nutzername)
write(neue_datenbank)
file_content = file_system.instance().read(database_path_from(nutzername))
parser = DatabaseParser()
parser.from_string(file_content)
database = Database(nutzername, ausgeschlossene_kategorien=ausgeschlossene_kategorien)
database.einzelbuchungen.parse(_to_table(parser.einzelbuchungen()))
print('READER: Einzelbuchungen gelesen')
database.dauerauftraege.parse(_to_table(parser.dauerauftraege()))
print('READER: Daueraufträge gelesen')
database.gemeinsamebuchungen.parse(_to_table(parser.gemeinsame_buchungen()))
print('READER: Gemeinsame Buchungen gelesen')
if parser.sparkontos():
database.sparkontos.parse(_to_table(parser.sparkontos()))
print('READER: Sparkontos gelesen')
if parser.sparbuchungen():
database.sparbuchungen.parse(_to_table(parser.sparbuchungen()))
print('READER: Sparbuchungen gelesen')
if parser.depotwerte():
database.depotwerte.parse(_to_table(parser.depotwerte()))
print('READER: Depotwerte gelesen')
if parser.order():
database.order.parse(_to_table(parser.order()))
print('READER: Depotwerte gelesen')
if parser.depotauszuege():
database.depotauszuege.parse(_to_table(parser.depotauszuege()))
print('READER: Depotauszuege gelesen')
if parser.order_dauerauftrag():
database.orderdauerauftrag.parse(_to_table(parser.order_dauerauftrag()))
print('READER: Order Dauerauftrag gelesen')
print('READER: Refreshe Database')
database.refresh()
print('READER: Refresh done')
return database
def wrap_tableheader(table_header_name):
return '{} {} {}'.format(KEYWORD_LINEBREAK, table_header_name, KEYWORD_LINEBREAK)
def write(database):
content = database.einzelbuchungen.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DAUERAUFRTAEGE)
content += database.dauerauftraege.content.to_csv(index=False)
content += wrap_tableheader(KEYWORD_GEMEINSAME_BUCHUNGEN)
content += database.gemeinsamebuchungen.content.to_csv(index=False)
content += wrap_tableheader(KEYWORD_SPARBUCHUNGEN)
content += database.sparbuchungen.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_SPARKONTOS)
content += database.sparkontos.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DEPOTWERTE)
content += database.depotwerte.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_ORDER)
content += database.order.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_ORDERDAUERAUFTRAG)
content += database.orderdauerauftrag.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DEPOTAUSZUEGE)
content += database.depotauszuege.get_static_content().to_csv(index=False)
file_system.instance().write(database_path_from(database.name), content)
print("WRITER: All Saved")
def database_path_from(username):
return '../Database_' + username + '.csv'
class DatabaseParser:
def __init__(self):
self._reader = MultiPartCsvReader(
set([
KEYWORD_EINZELBUCHUNGEN,
KEYWORD_DAUERAUFRTAEGE,
KEYWORD_GEMEINSAME_BUCHUNGEN,
KEYWORD_SPARBUCHUNGEN,
KEYWORD_SPARKONTOS,
KEYWORD_DEPOTWERTE,
KEYWORD_ORDER,
KEYWORD_DEPOTAUSZUEGE,
KEYWORD_ORDERDAUERAUFTRAG
]),
start_token=KEYWORD_EINZELBUCHUNGEN)
def from_string(self, lines):
self._reader.from_string(lines)
def einzelbuchungen(self):
return self._reader.get_string(KEYWORD_EINZELBUCHUNGEN)
def dauerauftraege(self):
return self._reader.get_string(KEYWORD_DAUERAUFRTAEGE)
def gemeinsame_buchungen(self):
return self._reader.get_string(KEYWORD_GEMEINSAME_BUCHUNGEN)
def sparbuchungen(self):
return self._reader.get_string(KEYWORD_SPARBUCHUNGEN)
def sparkontos(self):
return self._reader.get_string(KEYWORD_SPARKONTOS)
def depotwerte(self):
return self._reader.get_string(KEYWORD_DEPOTWERTE)
def order(self):
return self._reader.get_string(KEYWORD_ORDER)
def depotauszuege(self):
return self._reader.get_string(KEYWORD_DEPOTAUSZUEGE)
def order_dauerauftrag(self):
return self._reader.get_string(KEYWORD_ORDERDAUERAUFTRAG)
class MultiPartCsvReader:
def __init__(self, token, start_token):
self._token = token
self._start_token = start_token
self._tables = {}
def from_string(self, lines):
self._tables = dict.fromkeys(self._token, '')
mode = self._start_token
for line in lines:
line = line.strip()
if line == '':
continue
if line in self._token:
mode = line
continue
if not ',' in line:
break
self._tables[mode] = self._tables[mode] + KEYWORD_LINEBREAK + line
def get_string(self, token):
return self._tables[token].strip()
|
agpl-3.0
|
dhermes/bezier
|
docs/make_images.py
|
1
|
59464
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to make images that are intended for docs.
To actually execute these functions with the desired inputs, run:
.. code-block:: console
$ nox -s docs_images
"""
import os
try:
from matplotlib import patches
from matplotlib import path as _path_mod
import matplotlib.pyplot as plt
except ImportError:
patches = None
_path_mod = None
plt = None
import numpy as np
try:
import seaborn
except ImportError:
seaborn = None
import bezier
from bezier import _geometric_intersection
from bezier import _helpers
from bezier import _plot_helpers
from bezier.hazmat import clipping
from bezier.hazmat import geometric_intersection as _py_geometric_intersection
BLUE = "blue"
GREEN = "green"
RED = "red"
if seaborn is not None:
seaborn.set() # Required in ``seaborn >= 0.8``
# As of ``0.9.0``, this palette has
# (BLUE, ORANGE, GREEN, RED, PURPLE, BROWN).
_COLORS = seaborn.color_palette(palette="deep", n_colors=6)
BLUE = _COLORS[0]
GREEN = _COLORS[2]
RED = _COLORS[3]
del _COLORS
_DOCS_DIR = os.path.abspath(os.path.dirname(__file__))
IMAGES_DIR = os.path.join(_DOCS_DIR, "images")
NO_IMAGES = "GENERATE_IMAGES" not in os.environ
def save_image(figure, filename):
"""Save an image to the docs images directory.
Args:
filename (str): The name of the file (not containing
directory info).
"""
path = os.path.join(IMAGES_DIR, filename)
figure.savefig(path, bbox_inches="tight")
plt.close(figure)
def stack1d(*points):
"""Fill out the columns of matrix with a series of points.
This is because ``np.hstack()`` will just make another 1D vector
out of them and ``np.vstack()`` will put them in the rows.
Args:
points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.
arrays with shape ``(2,)``.
Returns:
numpy.ndarray: The array with each point in ``points`` as its
columns.
"""
result = np.empty((2, len(points)), order="F")
for index, point in enumerate(points):
result[:, index] = point
return result
def linearization_error(nodes):
"""Image for :func:`.linearization_error` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
line = bezier.Curve.from_nodes(nodes[:, (0, -1)])
midpoints = np.hstack([curve.evaluate(0.5), line.evaluate(0.5)])
ax = curve.plot(256, color=BLUE)
line.plot(256, ax=ax, color=GREEN)
ax.plot(
midpoints[0, :], midpoints[1, :], color="black", linestyle="dashed"
)
ax.axis("scaled")
save_image(ax.figure, "linearization_error.png")
def newton_refine1(s, new_s, curve1, t, new_t, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
points = np.hstack([curve1.evaluate(s), curve2.evaluate(t)])
points_new = np.hstack([curve1.evaluate(new_s), curve2.evaluate(new_t)])
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
ax.plot(
points[0, :],
points[1, :],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
ax.plot(
points_new[0, :],
points_new[1, :],
color="black",
linestyle="None",
marker="o",
)
ax.axis("scaled")
save_image(ax.figure, "newton_refine1.png")
def newton_refine2(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax, color=GREEN)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 5)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
save_image(ax.figure, "newton_refine2.png")
def newton_refine3(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax, color=GREEN)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 6)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 0.5625)
save_image(ax.figure, "newton_refine3.png")
def segment_intersection1(start0, end0, start1, end1, s):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2, color=BLUE)
line1.plot(256, ax=ax, color=GREEN)
(x_val,), (y_val,) = line0.evaluate(s)
ax.plot([x_val], [y_val], color="black", marker="o")
ax.axis("scaled")
save_image(ax.figure, "segment_intersection1.png")
def segment_intersection2(start0, end0, start1, end1):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2, color=BLUE)
line1.plot(2, ax=ax, color=GREEN)
ax.axis("scaled")
save_image(ax.figure, "segment_intersection2.png")
def helper_parallel_lines(start0, end0, start1, end1, filename):
"""Image for :func:`.parallel_lines_parameters` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
points = stack1d(start0, end0, start1, end1)
ax.plot(points[0, :2], points[1, :2], marker="o", color=BLUE)
ax.plot(points[0, 2:], points[1, 2:], marker="o", color=GREEN)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(figure, filename)
def add_patch(
ax, nodes, color, with_nodes=True, alpha=0.625, node_color="black"
):
# ``nodes`` is stored Fortran-contiguous with ``x-y`` points in each
# column but ``Path()`` wants ``x-y`` points in each row.
path = _path_mod.Path(nodes.T)
patch = patches.PathPatch(
path, edgecolor=color, facecolor=color, alpha=alpha
)
ax.add_patch(patch)
if with_nodes:
ax.plot(
nodes[0, :],
nodes[1, :],
color=node_color,
linestyle="None",
marker="o",
)
def curve_constructor(curve):
"""Image for :class`.Curve` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
line = ax.lines[0]
nodes = curve._nodes
ax.plot(
nodes[0, :], nodes[1, :], color="black", linestyle="None", marker="o"
)
add_patch(ax, nodes, line.get_color())
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "curve_constructor.png")
def curve_evaluate(curve):
"""Image for :meth`.Curve.evaluate` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = curve.evaluate_multi(np.asfortranarray([0.75]))
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "curve_evaluate.png")
def curve_evaluate_hodograph(curve, s):
"""Image for :meth`.Curve.evaluate_hodograph` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = curve.evaluate_multi(np.asfortranarray([s]))
if points.shape != (2, 1):
raise ValueError("Unexpected shape", points)
point = points[:, 0]
tangents = curve.evaluate_hodograph(s)
if tangents.shape != (2, 1):
raise ValueError("Unexpected shape", tangents)
tangent = tangents[:, 0]
ax.plot(
[point[0] - 2 * tangent[0], point[0] + 2 * tangent[0]],
[point[1] - 2 * tangent[1], point[1] + 2 * tangent[1]],
color=BLUE,
alpha=0.5,
)
ax.plot(
[point[0], point[0] + tangent[0]],
[point[1], point[1] + tangent[1]],
color="black",
linestyle="dashed",
marker="o",
markersize=5,
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.75)
ax.set_ylim(-0.0625, 0.75)
save_image(ax.figure, "curve_evaluate_hodograph.png")
def curve_subdivide(curve, left, right):
"""Image for :meth`.Curve.subdivide` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
add_patch(ax, curve._nodes, "gray")
ax = left.plot(256, ax=ax, color=BLUE)
line = ax.lines[-1]
add_patch(ax, left._nodes, line.get_color())
right.plot(256, ax=ax, color=GREEN)
line = ax.lines[-1]
add_patch(ax, right._nodes, line.get_color())
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 3.125)
save_image(ax.figure, "curve_subdivide.png")
def curve_intersect(curve1, curve2, s_vals):
"""Image for :meth`.Curve.intersect` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
intersections = curve1.evaluate_multi(s_vals)
ax.plot(
intersections[0, :],
intersections[1, :],
color="black",
linestyle="None",
marker="o",
)
ax.axis("scaled")
ax.set_xlim(0.0, 0.75)
ax.set_ylim(0.0, 0.75)
save_image(ax.figure, "curve_intersect.png")
def triangle_constructor(triangle):
"""Image for :class`.Triangle` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE, with_nodes=True)
line = ax.lines[0]
nodes = triangle._nodes
add_patch(ax, nodes[:, (0, 1, 2, 5)], line.get_color())
delta = 1.0 / 32.0
ax.text(
nodes[0, 0],
nodes[1, 0],
r"$v_0$",
fontsize=20,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
nodes[0, 1],
nodes[1, 1],
r"$v_1$",
fontsize=20,
verticalalignment="top",
horizontalalignment="center",
)
ax.text(
nodes[0, 2],
nodes[1, 2],
r"$v_2$",
fontsize=20,
verticalalignment="top",
horizontalalignment="left",
)
ax.text(
nodes[0, 3] - delta,
nodes[1, 3],
r"$v_3$",
fontsize=20,
verticalalignment="center",
horizontalalignment="right",
)
ax.text(
nodes[0, 4] + delta,
nodes[1, 4],
r"$v_4$",
fontsize=20,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
nodes[0, 5],
nodes[1, 5] + delta,
r"$v_5$",
fontsize=20,
verticalalignment="bottom",
horizontalalignment="center",
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "triangle_constructor.png")
def triangle_evaluate_barycentric(triangle, point):
"""Image for :meth`.Triangle.evaluate_barycentric` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
point[0, :], point[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "triangle_evaluate_barycentric.png")
def triangle_evaluate_cartesian_multi(triangle, points):
"""Image for :meth`.Triangle.evaluate_cartesian_multi` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "triangle_evaluate_cartesian_multi.png")
def triangle_evaluate_barycentric_multi(triangle, points):
"""Image for :meth`.Triangle.evaluate_barycentric_multi` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0] + delta,
r"$w_0$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="center",
)
ax.text(
points[0, 1],
points[1, 1] - delta,
r"$w_1$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 2],
points[1, 2],
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.text(
points[0, 3],
points[1, 3],
r"$w_3$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.125)
ax.set_ylim(-0.3125, 2.125)
save_image(ax.figure, "triangle_evaluate_barycentric_multi.png")
def triangle_is_valid1(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 2.125)
save_image(ax.figure, "triangle_is_valid1.png")
def triangle_is_valid2(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.0625)
ax.set_ylim(-0.0625, 1.0625)
save_image(ax.figure, "triangle_is_valid2.png")
def triangle_is_valid3(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
edge1, edge2, edge3 = triangle.edges
N = 128
# Compute points on each edge.
std_s = np.linspace(0.0, 1.0, N + 1)
points1 = edge1.evaluate_multi(std_s)
points2 = edge2.evaluate_multi(std_s)
points3 = edge3.evaluate_multi(std_s)
# Compute the actual boundary where the Jacobian is 0.
s_vals = np.linspace(0.0, 0.2, N)
t_discrim = np.sqrt((1.0 - s_vals) * (1.0 - 5.0 * s_vals))
t_top = 0.5 * (1.0 - s_vals + t_discrim)
t_bottom = 0.5 * (1.0 - s_vals - t_discrim)
jacobian_zero_params = np.zeros((2 * N - 1, 2), order="F")
jacobian_zero_params[:N, 0] = s_vals
jacobian_zero_params[:N, 1] = t_top
jacobian_zero_params[N:, 0] = s_vals[-2::-1]
jacobian_zero_params[N:, 1] = t_bottom[-2::-1]
jac_edge = triangle.evaluate_cartesian_multi(jacobian_zero_params)
# Add the triangle to the plot and add a dashed line
# for each "true" edge.
figure = plt.figure()
ax = figure.gca()
(line,) = ax.plot(jac_edge[0, :], jac_edge[1, :], color=BLUE)
color = line.get_color()
ax.plot(points1[0, :], points1[1, :], color="black", linestyle="dashed")
ax.plot(points2[0, :], points2[1, :], color="black", linestyle="dashed")
ax.plot(points3[0, :], points3[1, :], color="black", linestyle="dashed")
polygon = np.hstack([points1[:, 1:], points2[:, 1:], jac_edge[:, 1:]])
add_patch(ax, polygon, color, with_nodes=False)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.0625, 1.0625)
save_image(ax.figure, "triangle_is_valid3.png")
def triangle_subdivide1():
"""Image for :meth`.Triangle.subdivide` docstring."""
if NO_IMAGES:
return
triangle = bezier.Triangle.from_nodes(
np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
)
triangle_a, triangle_b, triangle_c, triangle_d = triangle.subdivide()
figure, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
for ax in (ax1, ax2, ax3, ax4):
triangle.plot(2, ax=ax, color=BLUE)
triangle_a.plot(2, ax=ax1, color=GREEN)
ax1.text(
1.0 / 6.0,
1.0 / 6.0,
r"$A$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_b.plot(2, ax=ax2, color=GREEN)
ax2.text(
1.0 / 3.0,
1.0 / 3.0,
r"$B$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_c.plot(2, ax=ax3, color=GREEN)
ax3.text(
2.0 / 3.0,
1.0 / 6.0,
r"$C$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_d.plot(2, ax=ax4, color=GREEN)
ax4.text(
1.0 / 6.0,
2.0 / 3.0,
r"$D$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
for ax in (ax1, ax2, ax3, ax4):
ax.axis("scaled")
save_image(figure, "triangle_subdivide1")
def add_edges(ax, triangle, s_vals, color):
edge1, edge2, edge3 = triangle.edges
# Compute points on each edge.
points1 = edge1.evaluate_multi(s_vals)
points2 = edge2.evaluate_multi(s_vals)
points3 = edge3.evaluate_multi(s_vals)
# Add the points to the plot.
ax.plot(points1[0, :], points1[1, :], color=color)
ax.plot(points2[0, :], points2[1, :], color=color)
ax.plot(points3[0, :], points3[1, :], color=color)
def triangle_subdivide2(triangle, sub_triangle_b):
"""Image for :meth`.Triangle.subdivide` docstring."""
if NO_IMAGES:
return
# Plot set-up.
figure = plt.figure()
ax = figure.gca()
colors = seaborn.husl_palette(6)
N = 128
s_vals = np.linspace(0.0, 1.0, N + 1)
# Add edges from triangle.
add_edges(ax, triangle, s_vals, colors[4])
# Now do the same for triangle B.
add_edges(ax, sub_triangle_b, s_vals, colors[0])
# Add the control points polygon for the original triangle.
nodes = triangle._nodes[:, (0, 2, 4, 5, 0)]
add_patch(ax, nodes, colors[2], with_nodes=False)
# Add the control points polygon for the sub-triangle.
nodes = sub_triangle_b._nodes[:, (0, 1, 2, 5, 3, 0)]
add_patch(ax, nodes, colors[1], with_nodes=False)
# Plot **all** the nodes.
sub_nodes = sub_triangle_b._nodes
ax.plot(
sub_nodes[0, :],
sub_nodes[1, :],
color="black",
linestyle="None",
marker="o",
)
# Take those same points and add the boundary.
ax.plot(nodes[0, :], nodes[1, :], color="black", linestyle="dashed")
ax.axis("scaled")
ax.set_xlim(-1.125, 2.125)
ax.set_ylim(-0.125, 4.125)
save_image(ax.figure, "triangle_subdivide2")
def curved_polygon_constructor1(curved_poly):
"""Image for :class`.CurvedPolygon` docstring."""
if NO_IMAGES:
return
ax = curved_poly.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.625, 1.625)
save_image(ax.figure, "curved_polygon_constructor1.png")
def curved_polygon_constructor2(curved_poly):
"""Image for :class`.CurvedPolygon` docstring."""
if NO_IMAGES:
return
ax = curved_poly.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "curved_polygon_constructor2.png")
def triangle_locate(triangle, point):
"""Image for :meth`.Triangle.locate` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
point[0, :], point[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.1875, 1.0625)
save_image(ax.figure, "triangle_locate.png")
def curve_specialize(curve, new_curve):
"""Image for :meth`.Curve.specialize` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
interval = r"$\left[0, 1\right]$"
line = ax.lines[-1]
line.set_label(interval)
color1 = line.get_color()
new_curve.plot(256, ax=ax, color=GREEN)
interval = r"$\left[-\frac{1}{4}, \frac{3}{4}\right]$"
line = ax.lines[-1]
line.set_label(interval)
ax.plot(
curve._nodes[0, (0, -1)],
curve._nodes[1, (0, -1)],
color=color1,
linestyle="None",
marker="o",
)
ax.plot(
new_curve._nodes[0, (0, -1)],
new_curve._nodes[1, (0, -1)],
color=line.get_color(),
linestyle="None",
marker="o",
)
ax.legend(loc="lower right", fontsize=12)
ax.axis("scaled")
ax.set_xlim(-0.375, 1.125)
ax.set_ylim(-0.75, 0.625)
save_image(ax.figure, "curve_specialize.png")
def newton_refine_triangle(triangle, x_val, y_val, s, t, new_s, new_t):
"""Image for :func:`.hazmat.triangle_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
# Plot features of the parameter space in ax1.
linear_triangle = bezier.Triangle.from_nodes(
np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
)
linear_triangle.plot(2, ax=ax1, color=BLUE)
ax1.plot([0.25], [0.5], marker="H", color=GREEN)
ax1.plot([s], [t], color="black", linestyle="None", marker="o")
ax1.plot(
[new_s],
[new_t],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Plot the equivalent output in ax2.
triangle.plot(256, ax=ax2, color=BLUE)
points = triangle.evaluate_cartesian_multi(
np.asfortranarray([[s, t], [new_s, new_t]])
)
ax2.plot([x_val], [y_val], marker="H", color=GREEN)
ax2.plot(
points[0, [0]],
points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax2.plot(
points[0, [1]],
points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax1.axis("scaled")
ax1.set_xlim(-0.0625, 1.0625)
ax1.set_ylim(-0.0625, 1.0625)
ax2.axis("scaled")
ax2.set_xlim(-0.125, 2.125)
ax2.set_ylim(-0.125, 2.125)
save_image(figure, "newton_refine_triangle.png")
def classify_help(s, curve1, triangle1, curve2, triangle2, interior, ax=None):
assert triangle1.is_valid
edge1, _, _ = triangle1.edges
assert np.all(edge1._nodes == curve1._nodes)
assert triangle2.is_valid
edge2, _, _ = triangle2.edges
assert np.all(edge2._nodes == curve2._nodes)
ax = triangle1.plot(256, ax=ax, color=BLUE)
# Manually reduce the alpha on the triangle patch(es).
ax.patches[-1].set_alpha(0.1875)
color1 = ax.lines[-1].get_color()
triangle2.plot(256, ax=ax, color=GREEN)
ax.patches[-1].set_alpha(0.1875)
color2 = ax.lines[-1].get_color()
# Remove the existing boundary (lines) and just add our edges.
while ax.lines:
ax.lines[-1].remove()
edge1.plot(256, ax=ax, color=color1)
edge2.plot(256, ax=ax, color=color2)
(int_x,), (int_y,) = curve1.evaluate(s)
if interior == 0:
color = color1
elif interior == 1:
color = color2
else:
color = RED
ax.plot([int_x], [int_y], color=color, linestyle="None", marker="o")
ax.axis("scaled")
return ax
def classify_intersection1(s, curve1, tangent1, curve2, tangent2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[1.0, 1.75, 2.0, 1.0, 1.5, 1.0], [0.0, 0.25, 1.0, 1.0, 1.5, 2.0]]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 1.6875, 2.0, 0.25, 1.25, 0.5],
[0.0, 0.0625, 0.5, 1.0, 1.25, 2.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 0)
(int_x,), (int_y,) = curve1.evaluate(s)
# Remove the alpha from the color
color1 = ax.patches[0].get_facecolor()[:3]
color2 = ax.patches[1].get_facecolor()[:3]
ax.plot(
[int_x, int_x + tangent1[0, 0]],
[int_y, int_y + tangent1[1, 0]],
color=color1,
linestyle="dashed",
)
ax.plot(
[int_x, int_x + tangent2[0, 0]],
[int_y, int_y + tangent2[1, 0]],
color=color2,
linestyle="dashed",
)
ax.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection1.png")
def classify_intersection2(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[1.0, 1.5, 2.0, 1.25, 1.75, 1.5], [0.0, 1.0, 0.0, 1.0, 1.0, 2.0]]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[0.0, 1.5, 3.0, 0.75, 2.25, 1.5], [0.0, 1.0, 0.0, 2.0, 2.0, 4.0]]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 1)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection2.png")
def classify_intersection3(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[2.0, 1.5, 1.0, 1.75, 1.25, 1.5],
[0.0, 1.0, 0.0, -1.0, -1.0, -2.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[3.0, 1.5, 0.0, 2.25, 0.75, 1.5],
[0.0, 1.0, 0.0, -2.0, -2.0, -4.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 0)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection3.png")
def classify_intersection4(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[2.0, 1.5, 1.0, 1.75, 1.25, 1.5],
[0.0, 1.0, 0.0, -1.0, -1.0, -2.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[0.0, 1.5, 3.0, 0.75, 2.25, 1.5], [0.0, 1.0, 0.0, 2.0, 2.0, 4.0]]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection4.png")
def classify_intersection5(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.0, 1.5, 2.0, 1.25, 1.75, 1.5],
[0.0, 1.0, 0.0, 0.9375, 0.9375, 1.875],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[3.0, 1.5, 0.0, 2.25, 0.75, 1.5],
[0.0, 1.0, 0.0, -2.0, -2.0, -4.0],
]
)
)
figure, (ax1, ax2) = plt.subplots(2, 1)
classify_help(s, curve1, triangle1, curve2, triangle2, 0, ax=ax1)
classify_help(s, curve1, triangle1, curve2, triangle2, 1, ax=ax2)
# Remove the alpha from the color
color1 = ax1.patches[0].get_facecolor()[:3]
color2 = ax1.patches[1].get_facecolor()[:3]
# Now add the "degenerate" intersection polygons. The first
# comes from specializing to
# left1(0.5, 1.0)-left2(0.0, 0.25)-right1(0.375, 0.5)
triangle3 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.5, 1.75, 2.0, 1.6875, 1.9375, 1.875],
[0.5, 0.5, 0.0, 0.5, 0.234375, 0.46875],
]
)
)
# NOTE: We don't require the intersection polygon be valid.
triangle3.plot(256, ax=ax1, color=RED)
# The second comes from specializing to
# left1(0.0, 0.5)-right1(0.5, 0.625)-left3(0.75, 1.0)
triangle4 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.0, 1.25, 1.5, 1.0625, 1.3125, 1.125],
[0.0, 0.5, 0.5, 0.234375, 0.5, 0.46875],
]
)
)
# NOTE: We don't require the intersection polygon be valid.
triangle4.plot(256, ax=ax2, color=RED)
(int_x,), (int_y,) = curve1.evaluate(s)
ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o")
for ax in (ax1, ax2):
ax.axis("scaled")
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
plt.setp(ax1.get_xticklabels(), visible=False)
figure.tight_layout(h_pad=-7.0)
save_image(figure, "classify_intersection5.png")
def classify_intersection6(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[-0.125, -0.125, 0.375, -0.0625, 0.1875, 0.0],
[0.0625, -0.0625, 0.0625, 0.15625, 0.15625, 0.25],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[-0.25, -0.25, 0.75, 0.125, 0.625, 0.5],
[0.25, -0.25, 0.25, 0.625, 0.625, 1.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-0.3125, 1.0625)
ax.set_ylim(-0.0625, 0.3125)
save_image(ax.figure, "classify_intersection6.png")
def classify_intersection7(s, curve1a, curve1b, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 4.5, 9.0, 0.0, 4.5, 0.0],
[0.0, 0.0, 2.25, 1.25, 2.375, 2.5],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[11.25, 9.0, 2.75, 8.125, 3.875, 5.0],
[0.0, 4.5, 1.0, -0.75, -0.25, -1.5],
]
)
)
figure, (ax1, ax2) = plt.subplots(2, 1)
classify_help(s, curve1a, triangle1, curve2, triangle2, None, ax=ax1)
triangle1._nodes = np.asfortranarray(
triangle1._nodes[:, (2, 4, 5, 1, 3, 0)]
)
triangle1._edges = None
classify_help(0.0, curve1b, triangle1, curve2, triangle2, 0, ax=ax2)
for ax in (ax1, ax2):
ax.set_xlim(-0.125, 11.5)
ax.set_ylim(-0.125, 2.625)
plt.setp(ax1.get_xticklabels(), visible=False)
figure.tight_layout(h_pad=-5.0)
save_image(figure, "classify_intersection7.png")
def get_curvature(nodes, s, tangent_vec, curvature):
"""Image for :func:`get_curvature` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
# Find the center of the circle along the direction
# perpendicular to the tangent vector (90 degree left turn).
radius_dir = np.asfortranarray([[-tangent_vec[1, 0]], [tangent_vec[0, 0]]])
radius_dir /= np.linalg.norm(radius_dir, ord=2)
point = curve.evaluate(s)
circle_center = point + radius_dir / curvature
# Add the curve.
ax = curve.plot(256, color=BLUE)
# Add the circle.
circle_center = circle_center.ravel(order="F")
circle = plt.Circle(circle_center, 1.0 / abs(curvature), alpha=0.25)
ax.add_artist(circle)
# Add the point.
ax.plot(
point[0, :], point[1, :], color="black", marker="o", linestyle="None"
)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.0625, 0.625)
save_image(ax.figure, "get_curvature.png")
def curve_locate(curve, point1, point2, point3):
"""Image for :meth`.Curve.locate` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = np.hstack([point1, point2, point3])
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.8125, 0.0625)
ax.set_ylim(0.75, 2.0625)
save_image(ax.figure, "curve_locate.png")
def newton_refine_curve(curve, point, s, new_s):
"""Image for :func:`.hazmat.curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
ax.plot(point[0, :], point[1, :], marker="H", color=GREEN)
wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s]))
ax.plot(
wrong_points[0, [0]],
wrong_points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax.plot(
wrong_points[0, [1]],
wrong_points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 3.125)
ax.set_ylim(-0.125, 1.375)
save_image(ax.figure, "newton_refine_curve.png")
def newton_refine_curve_cusp(curve, s_vals):
"""Image for :func:`.hazmat.curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
points = curve.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 6)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 6.125)
ax.set_ylim(-3.125, 3.125)
save_image(ax.figure, "newton_refine_curve_cusp.png")
def classify_intersection8(s, curve1, triangle1, curve2, triangle2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-1.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection8.png")
def _edges_classify_intersection9():
"""The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`.
"""
edges1 = (
bezier.Curve.from_nodes(
np.asfortranarray([[32.0, 30.0], [20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 32.0], [15.0, 20.0]])
),
)
edges2 = (
bezier.Curve.from_nodes(
np.asfortranarray([[8.0, 10.0], [20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 8.0], [25.0, 20.0]])
),
)
return edges1, edges2
def classify_intersection9(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 20.0, 40.0, 10.0, 30.0, 20.0],
[0.0, 40.0, 0.0, 25.0, 25.0, 50.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[40.0, 20.0, 0.0, 30.0, 10.0, 20.0],
[40.0, 0.0, 40.0, 15.0, 15.0, -10.0],
]
)
)
figure, (ax1, ax2) = plt.subplots(1, 2)
classify_help(s, curve1, triangle1, curve2, triangle2, 0, ax=ax1)
classify_help(s, curve1, triangle1, curve2, triangle2, 1, ax=ax2)
# Remove the alpha from the color
color1 = ax1.patches[0].get_facecolor()[:3]
color2 = ax1.patches[1].get_facecolor()[:3]
# Now add the "degenerate" intersection polygons.
cp_edges1, cp_edges2 = _edges_classify_intersection9()
curved_polygon1 = bezier.CurvedPolygon(*cp_edges1)
curved_polygon1.plot(256, ax=ax1, color=RED)
curved_polygon2 = bezier.CurvedPolygon(*cp_edges2)
curved_polygon2.plot(256, ax=ax2, color=RED)
(int_x,), (int_y,) = curve1.evaluate(s)
ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o")
for ax in (ax1, ax2):
ax.axis("scaled")
ax.set_xlim(-2.0, 42.0)
ax.set_ylim(-12.0, 52.0)
plt.setp(ax2.get_yticklabels(), visible=False)
figure.tight_layout(w_pad=1.0)
save_image(figure, "classify_intersection9.png")
def curve_elevate(curve, elevated):
"""Image for :meth:`.curve.Curve.elevate` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
curve.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
add_patch(ax1, curve._nodes, color)
elevated.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
add_patch(ax2, elevated._nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax1)
ax2.set_xlim(*ax1.get_xlim())
ax2.set_ylim(*ax1.get_ylim())
save_image(figure, "curve_elevate.png")
def triangle_elevate(triangle, elevated):
"""Image for :meth:`.triangle.Triangle.elevate` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
triangle.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
nodes = triangle._nodes[:, (0, 1, 2, 4, 5)]
add_patch(ax1, nodes, color)
elevated.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
nodes = elevated._nodes[:, (0, 1, 2, 3, 6, 8, 9)]
add_patch(ax2, nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax1)
ax2.set_xlim(*ax1.get_xlim())
ax2.set_ylim(*ax1.get_ylim())
save_image(figure, "triangle_elevate.png")
def unit_triangle():
"""Image for :class:`.triangle.Triangle` docstring."""
if NO_IMAGES:
return
nodes = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
triangle = bezier.Triangle(nodes, degree=1)
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(ax.figure, "unit_triangle.png")
def curve_reduce(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
curve.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
add_patch(ax1, curve._nodes, color)
reduced.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
add_patch(ax2, reduced._nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax2)
save_image(figure, "curve_reduce.png")
def curve_reduce_approx(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
color = ax.lines[-1].get_color()
add_patch(ax, curve._nodes, color, alpha=0.25, node_color=color)
reduced.plot(256, ax=ax, color=GREEN)
color = ax.lines[-1].get_color()
add_patch(ax, reduced._nodes, color, alpha=0.25, node_color=color)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(ax.figure, "curve_reduce_approx.png")
def simple_axis(ax):
ax.axis("scaled")
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_with_bbox(curve, ax, color, with_nodes=False):
curve.plot(256, color=color, ax=ax)
left, right, bottom, top = _helpers.bbox(curve._nodes)
bbox_nodes = np.asfortranarray(
[[left, right, right, left], [bottom, bottom, top, top]]
)
add_patch(ax, bbox_nodes, color, with_nodes=False)
if with_nodes:
ax.plot(
curve._nodes[0, :],
curve._nodes[1, :],
color=color,
linestyle="None",
marker="o",
markersize=4,
)
def plot_with_convex_hull(curve, ax, color, with_nodes=False):
curve.plot(256, color=color, ax=ax)
convex_hull = _helpers.simple_convex_hull(curve._nodes)
add_patch(ax, convex_hull, color, with_nodes=False)
if with_nodes:
ax.plot(
curve._nodes[0, :],
curve._nodes[1, :],
color=color,
linestyle="None",
marker="o",
markersize=4,
)
def _curve_boundary_predicate(filename, curve_boundary_plot, headers):
header1, header2, header3 = headers
figure, (ax1, ax2, ax3) = plt.subplots(1, 3)
control_pts1a = np.asfortranarray([[0.0, 0.375, 1.0], [0.0, 0.5, 0.125]])
curve1a = bezier.Curve(control_pts1a, degree=2)
control_pts1b = np.asfortranarray(
[[0.25, -0.125, 0.5], [-0.125, 0.375, 1.0]]
)
curve1b = bezier.Curve(control_pts1b, degree=2)
curve_boundary_plot(curve1a, ax1, BLUE)
curve_boundary_plot(curve1b, ax1, GREEN)
control_pts2a = np.asfortranarray([[0.0, 0.75, 1.0], [1.0, 0.75, 0.0]])
curve2a = bezier.Curve(control_pts2a, degree=2)
control_pts2b = np.asfortranarray(
[[0.625, 0.875, 1.625], [1.625, 0.875, 0.625]]
)
curve2b = bezier.Curve(control_pts2b, degree=2)
curve_boundary_plot(curve2a, ax2, BLUE)
curve_boundary_plot(curve2b, ax2, GREEN)
control_pts3a = np.asfortranarray([[0.0, 0.25, 1.0], [-0.25, 0.25, -0.75]])
curve3a = bezier.Curve(control_pts3a, degree=2)
control_pts3b = np.asfortranarray([[1.0, 1.5, 2.0], [-1.0, -1.5, -1.0]])
curve3b = bezier.Curve(control_pts3b, degree=2)
curve_boundary_plot(curve3a, ax3, BLUE)
curve_boundary_plot(curve3b, ax3, GREEN)
for ax in (ax1, ax2, ax3):
simple_axis(ax)
text_size = 10
ax1.set_xlim(-0.2, 1.1)
ax1.set_ylim(-0.2, 1.1)
ax1.set_title(header1, fontsize=text_size)
ax2.set_xlim(-0.1, 1.75)
ax2.set_ylim(-0.1, 1.75)
ax2.set_title(header2, fontsize=text_size)
ax3.set_xlim(-0.1, 2.1)
ax3.set_ylim(-1.7, 0.5)
ax3.set_title(header3, fontsize=text_size)
figure.set_size_inches(6.0, 2.2)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.9, wspace=0.04, hspace=0.2
)
save_image(figure, filename)
def bounding_box_predicate():
headers = ("MAYBE", "MAYBE", "NO")
_curve_boundary_predicate(
"bounding_box_predicate.png", plot_with_bbox, headers
)
def convex_hull_predicate():
headers = ("MAYBE", "NO", "NO")
_curve_boundary_predicate(
"convex_hull_predicate.png", plot_with_convex_hull, headers
)
def subdivide_curve():
figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)
nodes = np.asfortranarray([[0.0, 1.0, 2.0, 4.0], [0.0, 4.0, 0.0, 3.0]])
curve = bezier.Curve.from_nodes(nodes)
left, right = curve.subdivide()
curve.plot(256, ax=ax1, alpha=0.25, color="black")
left.plot(256, ax=ax1)
curve.plot(256, ax=ax2)
curve.plot(256, ax=ax3, alpha=0.25, color="black")
right.plot(256, ax=ax3)
text_size = 10
ax1.text(
2.5,
0.25,
r"$\left[0, \frac{1}{2}\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
ax2.text(
2.5,
0.25,
r"$\left[0, 1\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
ax3.text(
2.5,
0.25,
r"$\left[\frac{1}{2}, 1\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
for ax in (ax1, ax2, ax3):
simple_axis(ax)
figure.set_size_inches(6.0, 1.5)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.2
)
save_image(figure, "subdivide_curve.png")
def bbox_intersect(curve1, curve2):
enum_val = _geometric_intersection.bbox_intersect(
curve1.nodes, curve2.nodes
)
return enum_val != _py_geometric_intersection.BoxIntersectionType.DISJOINT
def refine_candidates(left, right):
new_left = []
for curve in left:
new_left.extend(curve.subdivide())
new_right = []
for curve in right:
new_right.extend(curve.subdivide())
keep_left = []
keep_right = []
for curve1 in new_left:
for curve2 in new_right:
if bbox_intersect(curve1, curve2):
keep_left.append(curve1)
if curve2 not in keep_right:
keep_right.append(curve2)
return keep_left, keep_right
def unique_curves(pairs):
left_tuples = set()
right_tuples = set()
left_curves = []
right_curves = []
for left, right in pairs:
as_tuple = tuple(left._nodes.flatten(order="F"))
if as_tuple not in left_tuples:
left_tuples.add(as_tuple)
left_curves.append(left)
as_tuple = tuple(right._nodes.flatten(order="F"))
if as_tuple not in right_tuples:
right_tuples.add(as_tuple)
right_curves.append(right)
return left_curves, right_curves
def subdivision_process():
nodes15 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])
curve15 = bezier.Curve(nodes15, degree=2)
nodes25 = np.asfortranarray([[0.0, 0.25, 0.75, 1.0], [0.5, 1.0, 1.5, 0.5]])
curve25 = bezier.Curve(nodes25, degree=3)
figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)
ax1, ax2, ax3, ax4, ax5, ax6 = all_axes.flatten()
color1 = BLUE
color2 = GREEN
plot_with_bbox(curve15, ax1, color1)
plot_with_bbox(curve25, ax1, color2)
left, right = refine_candidates([curve15], [curve25])
for curve in left:
plot_with_bbox(curve, ax2, color1)
for curve in right:
plot_with_bbox(curve, ax2, color2)
for ax in (ax3, ax4, ax5, ax6):
left, right = refine_candidates(left, right)
curve15.plot(256, color=color1, alpha=0.5, ax=ax)
for curve in left:
plot_with_bbox(curve, ax, color=color1)
curve25.plot(256, color=color2, alpha=0.5, ax=ax)
for curve in right:
plot_with_bbox(curve, ax, color2)
for ax in (ax1, ax2, ax3, ax4, ax5, ax6):
simple_axis(ax)
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(0.4, 1.15)
figure.set_size_inches(6.0, 2.8)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.04
)
save_image(figure, "subdivision_process.png")
def _subdivision_pruning_zoom(all_axes, column):
half_width = 0.5 ** (column + 2)
min_x = 0.75 - half_width
max_x = 0.75 + half_width
min_y = 0.25 - half_width
max_y = 0.25 + half_width
for row in (0, 1, 2):
all_axes[row, column].plot(
[min_x, max_x, max_x, min_x, min_x],
[min_y, min_y, max_y, max_y, min_y],
color="black",
)
buffer = 0.5 ** (column + 6)
for row in (1, 2):
all_axes[row, column].set_xlim(min_x - buffer, max_x + buffer)
all_axes[row, column].set_ylim(min_y - buffer, max_y + buffer)
def subdivision_pruning():
figure, all_axes = plt.subplots(3, 4)
nodes69 = np.asfortranarray([[0.0, 1.0, 1.0], [0.0, 0.0, 1.0]])
curve69 = bezier.Curve(nodes69, degree=2)
delta = np.asfortranarray([[1.0, 1.0, -1.0], [-1.0, -1.0, 1.0]]) / 32.0
nodes_other = nodes69 + delta
curve_other = bezier.Curve(nodes_other, degree=2)
color1 = BLUE
color2 = GREEN
for ax in all_axes.flatten():
curve69.plot(256, color=color1, ax=ax)
curve_other.plot(256, color=color2, ax=ax)
candidates = {0: [(curve69, curve_other)]}
intersections = []
for i in range(5):
candidates[i + 1] = _py_geometric_intersection.intersect_one_round(
candidates[i], intersections
)
for column in (0, 1, 2, 3):
left_curves, right_curves = unique_curves(candidates[column + 2])
for curve in left_curves:
plot_with_bbox(curve, all_axes[0, column], color1)
plot_with_bbox(curve, all_axes[1, column], color1, with_nodes=True)
plot_with_convex_hull(
curve, all_axes[2, column], color1, with_nodes=True
)
for curve in right_curves:
plot_with_bbox(curve, all_axes[0, column], color2)
plot_with_bbox(curve, all_axes[1, column], color2, with_nodes=True)
plot_with_convex_hull(
curve, all_axes[2, column], color2, with_nodes=True
)
for ax in all_axes.flatten():
simple_axis(ax)
_subdivision_pruning_zoom(all_axes, 0)
_subdivision_pruning_zoom(all_axes, 1)
_subdivision_pruning_zoom(all_axes, 2)
_subdivision_pruning_zoom(all_axes, 3)
intersection_params = curve69.intersect(curve_other)
s_vals = intersection_params[0, :]
intersections = curve69.evaluate_multi(s_vals)
for column in (0, 1, 2, 3):
all_axes[0, column].plot(
intersections[0, :],
intersections[1, :],
color="black",
linestyle="None",
marker="o",
markersize=4,
)
save_image(figure, "subdivision_pruning.png")
def _plot_endpoints_line(ax, fat_line_coeffs, **plot_kwargs):
# NOTE: This assumes the x-limits have already been set for the axis.
coeff_a, coeff_b, coeff_c, _, _ = fat_line_coeffs
if coeff_b == 0.0:
raise NotImplementedError("Vertical lines not supported")
min_x, max_x = ax.get_xlim()
# ax + by + c = 0 ==> y = -(ax + c)/b
min_y = -(coeff_a * min_x + coeff_c) / coeff_b
max_y = -(coeff_a * max_x + coeff_c) / coeff_b
ax.plot(
[min_x, max_x],
[min_y, max_y],
**plot_kwargs,
)
ax.set_xlim(min_x, max_x)
def _normalize_implicit_line_tuple(info):
length = np.linalg.norm(info[:2], ord=2)
return tuple(np.array(info) / length)
def compute_implicit_line(nodes):
"""Image for :func:`.hazmat.clipping.compute_implicit_line` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
ax = curve.plot(256, color=BLUE)
min_x, max_x = nodes[0, (0, -1)]
min_y, max_y = nodes[1, (0, -1)]
ax.plot(
[min_x, max_x, max_x],
[min_y, min_y, max_y],
color="black",
linestyle="dashed",
)
ax.axis("scaled")
# NOTE: This "cheats" and assumes knowledge of what's actually in ``nodes``.
ax.set_xticks([0.0, 1.0, 2.0, 3.0, 4.0])
ax.set_yticks([0.0, 1.0, 2.0, 3.0])
info = clipping.compute_fat_line(nodes)
info = _normalize_implicit_line_tuple(info)
_plot_endpoints_line(ax, info, color="black")
save_image(ax.figure, "compute_implicit_line.png")
def _plot_fat_lines(ax, fat_line_coeffs, **fill_between_kwargs):
# NOTE: This assumes the x-limits have already been set for the axis.
coeff_a, coeff_b, coeff_c, d_low, d_high = fat_line_coeffs
if coeff_b == 0.0:
raise NotImplementedError("Vertical lines not supported")
min_x, max_x = ax.get_xlim()
coeff_c_low = coeff_c - d_low
coeff_c_high = coeff_c - d_high
# ax + by + c = 0 ==> y = -(ax + c)/b
min_y_low = -(coeff_a * min_x + coeff_c_low) / coeff_b
min_y_high = -(coeff_a * min_x + coeff_c_high) / coeff_b
max_y_low = -(coeff_a * max_x + coeff_c_low) / coeff_b
max_y_high = -(coeff_a * max_x + coeff_c_high) / coeff_b
ax.fill_between(
[min_x, max_x],
[min_y_low, max_y_low],
[min_y_high, max_y_high],
**fill_between_kwargs,
)
ax.set_xlim(min_x, max_x)
def _add_perpendicular_segments(ax, nodes, fat_line_coeffs, color):
coeff_a, coeff_b, coeff_c, _, _ = fat_line_coeffs
_, num_nodes = nodes.shape
for index in range(num_nodes):
# ax + by + c = 0 is perpendicular to lines of the form
# bx - ay = c'
curr_x, curr_y = nodes[:, index]
c_prime = coeff_b * curr_x - coeff_a * curr_y
# bx - ay = c' intersects ax + by + c = 0 at
# [x0, y0] = [b c' - a c, -a c' - b c] (assuming a^2 + b^2 == 1)
x_intersect = coeff_b * c_prime - coeff_a * coeff_c
y_intersect = -coeff_a * c_prime - coeff_b * coeff_c
ax.plot(
[curr_x, x_intersect],
[curr_y, y_intersect],
color=color,
linestyle="dashed",
)
def compute_fat_line(nodes, fat_line_coeffs):
"""Image for :func:`.hazmat.clipping.compute_fat_line` docstring."""
if NO_IMAGES:
return
fat_line_coeffs = _normalize_implicit_line_tuple(fat_line_coeffs)
curve = bezier.Curve.from_nodes(nodes)
ax = curve.plot(256, color=BLUE)
ax.plot(
nodes[0, :],
nodes[1, :],
marker="o",
color=BLUE,
linestyle="none",
)
_add_perpendicular_segments(ax, nodes, fat_line_coeffs, BLUE)
ax.axis("scaled")
_plot_endpoints_line(ax, fat_line_coeffs, color=BLUE, linestyle="dashed")
_plot_fat_lines(ax, fat_line_coeffs, color=BLUE, alpha=0.5)
save_image(ax.figure, "compute_fat_line.png")
def clip_range(nodes1, nodes2):
"""Image for :func:`.hazmat.clipping.clip_range` docstring."""
if NO_IMAGES:
return
curve1 = bezier.Curve.from_nodes(nodes1)
curve2 = bezier.Curve.from_nodes(nodes2)
# Plot both curves as well as the nodes.
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
ax.plot(
nodes1[0, :],
nodes1[1, :],
marker="o",
color=BLUE,
linestyle="none",
)
ax.plot(
nodes2[0, :],
nodes2[1, :],
marker="o",
color=GREEN,
linestyle="none",
)
fat_line_coeffs = clipping.compute_fat_line(nodes1)
fat_line_coeffs = _normalize_implicit_line_tuple(fat_line_coeffs)
# Add perpendicular lines to the "implicit" line.
_add_perpendicular_segments(ax, nodes2, fat_line_coeffs, GREEN)
# Establish boundary **assuming** contents of ``nodes1`` and ``nodes2``.
ax.axis("scaled")
ax.set_xlim(-0.625, 7.375)
ax.set_ylim(-0.25, 4.625)
_plot_endpoints_line(ax, fat_line_coeffs, color=BLUE, linestyle="dashed")
_plot_fat_lines(ax, fat_line_coeffs, color=BLUE, alpha=0.5)
save_image(ax.figure, "clip_range.png")
def clip_range_distances(nodes1, nodes2):
"""Image for :func:`.hazmat.clipping.clip_range` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
fat_line_coeffs = clipping.compute_fat_line(nodes1)
coeff_a, coeff_b, coeff_c, d_min, d_max = fat_line_coeffs
degree2, polynomial = clipping._clip_range_polynomial(
nodes2, coeff_a, coeff_b, coeff_c
)
ax.fill_between([0.0, degree2], d_min, d_max, color=BLUE, alpha=0.25)
s_min, s_max = clipping.clip_range(nodes1, nodes2)
convex_hull = _helpers.simple_convex_hull(polynomial)
add_patch(
ax,
convex_hull,
GREEN,
with_nodes=True,
alpha=0.625,
node_color=GREEN,
)
# Plot the true distance function ``d(t)``.
t_values = np.linspace(0.0, 1.0, 257)
curve2 = bezier.Curve.from_nodes(nodes2)
evaluated = curve2.evaluate_multi(t_values)
x_values = degree2 * t_values
d_values = coeff_a * evaluated[0, :] + coeff_b * evaluated[1, :] + coeff_c
ax.plot(x_values, d_values, color=GREEN)
# Add dashed lines to each control point in the convex hull.
for index in range(degree2 + 1):
x_val, y_val = polynomial[:, index]
ax.plot([x_val, x_val], [0.0, y_val], color=GREEN, linestyle="dashed")
# NOTE: This "cheats" and uses the fact that it knows that ``s_min``
# corresponds to ``d_max`` and ``s_max`` corresponds to ``d_min``.
ax.plot(
[degree2 * s_min, degree2 * s_max],
[d_max, d_min],
color="black",
marker="o",
linestyle="none",
)
# Use minor xticks **above** for showing s_min and s_max.
jitter = 0.5 ** 5
# NOTE: We introduce ``jitter`` to avoid using the same value for a minor
# xtick that is used for a major one. When ``matplotlib`` sees a
# minor xtick at the exact same value used by a major xtick, it
# ignores the tick.
ax.set_xticks(
[degree2 * s_min + jitter, degree2 * s_max - jitter], minor=True
)
ax.set_xticklabels([f"$t = {s_min}$", f"$t = {s_max}$"], minor=True)
ax.tick_params(
axis="x",
which="minor",
direction="in",
top=False,
bottom=False,
labelbottom=False,
labeltop=True,
)
# Add line up to minor xticks. Similar to the dots on ``s_min`` and
# ``s_max`` this "cheats" with the correspondence to ``d_min`` / ``d_max``.
min_y, max_y = ax.get_ylim()
ax.plot(
[degree2 * s_min, degree2 * s_min],
[d_max, max_y],
color="black",
alpha=0.125,
linestyle="dashed",
)
ax.plot(
[degree2 * s_max, degree2 * s_max],
[d_min, max_y],
color="black",
alpha=0.125,
linestyle="dashed",
)
ax.set_ylim(min_y, max_y)
ax.set_xlabel("$2t$")
ax.set_ylabel("$d(t)$", rotation=0)
save_image(figure, "clip_range_distances.png")
def main():
bounding_box_predicate()
convex_hull_predicate()
subdivide_curve()
subdivision_process()
subdivision_pruning()
if __name__ == "__main__":
main()
|
apache-2.0
|
michaelshin/deepCellVision
|
cellVision/image_handler.py
|
1
|
4792
|
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
from django.conf import settings
try:
# faster implementation using bindings to libxml
from lxml import etree as ET
except ImportError:
print 'Falling back to default ElementTree implementation'
from xml.etree import ElementTree as ET
import re
RE_NAME_LONG = re.compile('^(\d+)_Exp(\d+)Cam(\d+)$')
RE_NAME = re.compile('^Exp(\d+)Cam(\d+)$')
def save(path, ext='png', close=True, verbose=False):
import os
"""Save a figure from pyplot.
Parameters
----------
path : string
The path (and filename, without the extension) to save the
figure to.
ext : string (default='png')
The file extension. This must be supported by the active
matplotlib backend (see matplotlib.backends module). Most
backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.
close : boolean (default=True)
Whether to close the figure after saving. If you want to save
the figure multiple times (e.g., to multiple formats), you
should NOT close it in between saves or you will have to
re-plot it.
verbose : boolean (default=True)
Whether to print information about when and where the image
has been saved.
"""
# Extract the directory and filename from the given path
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
# If the directory does not exist, create it
if not os.path.exists(directory):
os.makedirs(directory)
# The final path to save to
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath),
# Actually save the figure
plt.savefig(savepath, bbox_inches='tight', pad_inches = 0)
# Close it
if close:
plt.close()
if verbose:
print("Done")
return savepath
def _segment(cell):
# takes a numpy array of a microscopy
# segments it based on filtering the image then applying a distance transform and
# a watershed method to get the proper segmentation
import mahotas as mh
filt_cell = mh.gaussian_filter(cell, 2)
T = mh.thresholding.otsu((np.rint(filt_cell).astype('uint8')))
dist = mh.stretch(mh.distance(filt_cell > T))
Bc = np.ones((3,3))
rmax = mh.regmin((dist))
rmax = np.invert(rmax)
labels, num_cells = mh.label(rmax, Bc)
surface = (dist.max() - dist)
areas = mh.cwatershed(dist, labels)
areas *= T
return areas
def show_segment(path, name):
from PIL import Image
img = Image.open(path).convert('L')
arr = np.asarray(img, np.uint8)
arr = _segment(arr)
plt.imshow(arr)
loc = str(settings.MEDIA_ROOT + '/segment/' + name.split('.')[0])
save(loc)
np.save(loc, arr) #save the array to give as a raw file
return
def _parse_xml(x):
frames = cams = exps = arrays = 0
for n in [e.get('Name') for e in ET.fromstring(x).findall('Arrays/Array[@Name]')]:
# Print "n" if you want to see the values of the "Name" attribute
# print n
arrays += 1
# Names found in Oren's flex files
m = RE_NAME_LONG.match(n)
if m:
frames, exps, cams = [max(g) for g in zip(map(int, m.groups()), (frames, exps, cams))]
continue
# Names found in Mojca's flex files
m = RE_NAME.match(n)
if m:
exps, cams = [max(g) for g in zip(map(int, m.groups()), (exps, cams))]
frames = arrays / cams
continue
raise Exception('Unknown flex name pattern')
return frames, exps, cams
def _r(fp):
'''
Read one byte as char and return byte value
'''
return ord(fp.read(1))
'''
type reading utils
'''
def _get_short(fp):
return _r(fp) + (_r(fp) << 8)
def _get_int(fp):
return _r(fp) + (_r(fp) << 8) + (_r(fp) << 16) + (_r(fp) << 24)
def get_flex_data(im):
im = open(im, 'rb')
_mm = im.read(2)
_ver = _get_short(im)
_offs = _get_int(im)
im.seek(_offs)
_num_tags = _get_short(im)
xml = None
for _tag_idx in xrange(_num_tags):
_tag = _get_short(im)
_tag_type = _get_short(im)
_tag_len = _get_int(im)
if _tag_type == 3 and _tag_len == 1:
_tag_value = _get_short(im)
_ = _get_short(im)
else:
_tag_value = _get_int(im)
if _tag == 65200:
_saved_offs = im.tell()
im.seek(_tag_value)
xml = im.read(_tag_len)
im.seek(_saved_offs)
im.close()
return _parse_xml(xml)
|
mit
|
ryfeus/lambda-packs
|
Sklearn_scipy_numpy/source/sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
2823
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
mit
|
kevin-coder/tensorflow-fork
|
tensorflow/contrib/gan/python/estimator/python/stargan_estimator_test.py
|
7
|
12092
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's stargan_estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def dummy_generator_fn(input_data, input_data_domain_label, mode):
del input_data_domain_label, mode
return variable_scope.get_variable('dummy_g', initializer=0.5) * input_data
def dummy_discriminator_fn(input_data, num_domains, mode):
del mode
hidden = layers.flatten(input_data)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden, num_outputs=num_domains, scope='debug')
return output_src, output_cls
class StarGetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `StarGetGANModel` produces the correct model."""
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
input_data = array_ops.ones([6, 4, 4, 3])
input_data_domain_label = array_ops.one_hot([0] * 6, 5)
gan_model = estimator._get_gan_model(
mode,
dummy_generator_fn,
dummy_discriminator_fn,
input_data,
input_data_domain_label,
add_summaries=False)
self.assertEqual(input_data, gan_model.input_data)
self.assertIsNotNone(gan_model.generated_data)
self.assertIsNotNone(gan_model.generated_data_domain_target)
self.assertLen(gan_model.generator_variables, 1)
self.assertIsNotNone(gan_model.generator_scope)
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.input_data_domain_label)
self.assertEqual(input_data_domain_label,
gan_model.generated_data_domain_target)
self.assertIsNone(gan_model.reconstructed_data)
self.assertIsNone(gan_model.discriminator_input_data_source_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNone(gan_model.discriminator_input_data_domain_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertEqual(input_data_domain_label,
gan_model.input_data_domain_label)
self.assertIsNotNone(gan_model.reconstructed_data.shape)
self.assertIsNotNone(
gan_model.discriminator_input_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_input_data_domain_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=array_ops.ones([1, 2, 2, 3]),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]) * dis_var,
discriminator_generated_data_source_predication=array_ops.ones(
[1]) * gen_var * dis_var,
discriminator_input_data_domain_predication=array_ops.ones([1, 2
]) * dis_var,
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]) *
gen_var * dis_var,
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model):
loss = math_ops.reduce_sum(
gan_model.discriminator_input_data_domain_predication -
gan_model.discriminator_generated_data_domain_predication)
loss += math_ops.reduce_sum(gan_model.input_data - gan_model.generated_data)
return tfgan_tuples.GANLoss(loss, loss)
def get_metrics(gan_model):
return {
'mse_custom_metric':
metrics_lib.mean_squared_error(gan_model.input_data,
gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
class StarGANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self,
train_input_fn,
eval_input_fn,
predict_input_fn,
prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.StarGANEstimator(
generator_fn=dummy_generator_fn,
discriminator_fn=dummy_discriminator_fn,
loss_fn=dummy_loss_fn,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', scores)
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', scores)
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@staticmethod
def _numpy_input_fn_wrapper(numpy_input_fn, batch_size, label_size):
"""Wrapper to remove the dictionary in numpy_input_fn.
NOTE:
We create the domain_label here because the model expect a fully define
batch_size from the input.
Args:
numpy_input_fn: input_fn created from numpy_io
batch_size: (int) number of items for each batch
label_size: (int) number of domains
Returns:
a new input_fn
"""
def new_input_fn():
features = numpy_input_fn()
return features['x'], array_ops.one_hot([0] * batch_size, label_size)
return new_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
batch_size = 5
img_size = 8
channel_size = 3
label_size = 3
image_data = np.zeros(
[batch_size, img_size, img_size, channel_size], dtype=np.float32)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data},
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, shuffle=False)
train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size,
label_size)
eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size,
label_size)
predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn,
batch_size, label_size)
predict_input_fn = estimator.stargan_prediction_input_fn_wrapper(
predict_input_fn)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, img_size, img_size, channel_size])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
etorre/elephant
|
elephant/spectral.py
|
4
|
20700
|
# -*- coding: utf-8 -*-
"""
Identification of spectral properties in analog signals (e.g., the power
spectrum).
:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import warnings
import numpy as np
import scipy.signal
import scipy.fftpack as fftpack
import scipy.signal.signaltools as signaltools
from scipy.signal.windows import get_window
from six import string_types
import quantities as pq
import neo
def _welch(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', scaling='density', axis=-1):
"""
A helper function to estimate cross spectral density using Welch's method.
This function is a slightly modified version of `scipy.signal.welch()` with
modifications based on `matplotlib.mlab._spectral_helper()`.
Welch's method [1]_ computes an estimate of the cross spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the cross-periodograms.
Parameters
----------
x, y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series in units of Hz.
Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross spectrum of x and y.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
"""
# TODO: This function should be replaced by `scipy.signal.csd()`, which
# will appear in SciPy 0.16.0.
# The checks for if y is x are so that we can use the same function to
# obtain both power spectrum and cross spectrum without doing extra
# calculations.
same_data = y is x
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if same_data:
y = x
else:
if x.shape != y.shape:
raise ValueError("x and y must be of the same shape.")
y = np.asarray(y)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data:
y = np.rollaxis(y, axis, len(y.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win * win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1] - nperseg + 1, step)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind + nperseg])
xft = fftpack.fft(x_dt * win, nfft)
if same_data:
yft = xft
else:
y_dt = detrend_func(y[..., ind:ind + nperseg])
yft = fftpack.fft(y_dt * win, nfft)
if k == 0:
Pxy = (xft * yft.conj())
else:
Pxy *= k / (k + 1.0)
Pxy += (xft * yft.conj()) / (k + 1.0)
Pxy *= scale
f = fftpack.fftfreq(nfft, 1.0 / fs)
if axis != -1:
Pxy = np.rollaxis(Pxy, -1, axis)
return f, Pxy
def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
fs=1.0, window='hanning', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimates power spectrum density (PSD) of a given AnalogSignal using
Welch's method, which works in the following steps:
1. cut the given data into several overlapping segments. The degree of
overlap can be specified by parameter *overlap* (default is 0.5,
i.e. segments are overlapped by the half of their length).
The number and the length of the segments are determined according
to parameter *num_seg*, *len_seg* or *freq_res*. By default, the
data is cut into 8 segments.
2. apply a window function to each segment. Hanning window is used by
default. This can be changed by giving a window function or an
array as parameter *window* (for details, see the docstring of
`scipy.signal.welch()`)
3. compute the periodogram of each segment
4. average the obtained periodograms to yield PSD estimate
These steps are implemented in `scipy.signal`, and this function is a
wrapper which provides a proper set of parameters to
`scipy.signal.welch()`. Some parameters for scipy.signal.welch(), such as
`nfft`, `detrend`, `window`, `return_onesided` and `scaling`, also works
for this function.
Parameters
----------
signal: Neo AnalogSignal or Quantity array or Numpy ndarray
Time series data, of which PSD is estimated. When a Quantity array or
Numpy ndarray is given, sampling frequency should be given through the
keyword argument `fs`, otherwise the default value (`fs=1.0`) is used.
num_seg: int, optional
Number of segments. The length of segments is adjusted so that
overlapping segments cover the entire stretch of the given data. This
parameter is ignored if *len_seg* or *freq_res* is given. Default is 8.
len_seg: int, optional
Length of segments. This parameter is ignored if *freq_res* is given.
Default is None (determined from other parameters).
freq_res: Quantity or float, optional
Desired frequency resolution of the obtained PSD estimate in terms of
the interval between adjacent frequency bins. When given as a float, it
is taken as frequency in Hz. Default is None (determined from other
parameters).
overlap: float, optional
Overlap between segments represented as a float number between 0 (no
overlap) and 1 (complete overlap). Default is 0.5 (half-overlapped).
fs: Quantity array or float, optional
Specifies the sampling frequency of the input time series. When the
input is given as an AnalogSignal, the sampling frequency is taken
from its attribute and this parameter is ignored. Default is 1.0.
window, nfft, detrend, return_onesided, scaling, axis: optional
These arguments are directly passed on to scipy.signal.welch(). See the
respective descriptions in the docstring of `scipy.signal.welch()` for
usage.
Returns
-------
freqs: Quantity array or Numpy ndarray
Frequencies associated with the power estimates in `psd`. `freqs` is
always a 1-dimensional array irrespective of the shape of the input
data. Quantity array is returned if `signal` is AnalogSignal or
Quantity array. Otherwise Numpy ndarray containing frequency in Hz is
returned.
psd: Quantity array or Numpy ndarray
PSD estimates of the time series in `signal`. Quantity array is
returned if `data` is AnalogSignal or Quantity array. Otherwise
Numpy ndarray is returned.
"""
# initialize a parameter dict (to be given to scipy.signal.welch()) with
# the parameters directly passed on to scipy.signal.welch()
params = {'window': window, 'nfft': nfft,
'detrend': detrend, 'return_onesided': return_onesided,
'scaling': scaling, 'axis': axis}
# add the input data to params. When the input is AnalogSignal, the
# data is added after rolling the axis for time index to the last
data = np.asarray(signal)
if isinstance(signal, neo.AnalogSignal):
data = np.rollaxis(data, 0, len(data.shape))
params['x'] = data
# if the data is given as AnalogSignal, use its attribute to specify
# the sampling frequency
if hasattr(signal, 'sampling_rate'):
params['fs'] = signal.sampling_rate.rescale('Hz').magnitude
else:
params['fs'] = fs
if overlap < 0:
raise ValueError("overlap must be greater than or equal to 0")
elif 1 <= overlap:
raise ValueError("overlap must be less then 1")
# determine the length of segments (i.e. *nperseg*) according to given
# parameters
if freq_res is not None:
if freq_res <= 0:
raise ValueError("freq_res must be positive")
dF = freq_res.rescale('Hz').magnitude \
if isinstance(freq_res, pq.quantity.Quantity) else freq_res
nperseg = int(params['fs'] / dF)
if nperseg > data.shape[axis]:
raise ValueError("freq_res is too high for the given data size")
elif len_seg is not None:
if len_seg <= 0:
raise ValueError("len_seg must be a positive number")
elif data.shape[axis] < len_seg:
raise ValueError("len_seg must be shorter than the data length")
nperseg = len_seg
else:
if num_seg <= 0:
raise ValueError("num_seg must be a positive number")
elif data.shape[axis] < num_seg:
raise ValueError("num_seg must be smaller than the data length")
# when only *num_seg* is given, *nperseg* is determined by solving the
# following equation:
# num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
# ----------------- =============================== ^^^^^^^^^^^
# summed segment lengths total overlap data length
nperseg = int(data.shape[axis] / (num_seg - overlap * (num_seg - 1)))
params['nperseg'] = nperseg
params['noverlap'] = int(nperseg * overlap)
freqs, psd = scipy.signal.welch(**params)
# attach proper units to return values
if isinstance(signal, pq.quantity.Quantity):
if 'scaling' in params and params['scaling'] is 'spectrum':
psd = psd * signal.units * signal.units
else:
psd = psd * signal.units * signal.units / pq.Hz
freqs = freqs * pq.Hz
return freqs, psd
def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
fs=1.0, window='hanning', nfft=None, detrend='constant',
scaling='density', axis=-1):
"""
Estimates coherence between a given pair of analog signals. The estimation
is performed with Welch's method: the given pair of data are cut into short
segments, cross-spectra are calculated for each pair of segments, and the
cross-spectra are averaged and normalized by respective auto_spectra. By
default the data are cut into 8 segments with 50% overlap between
neighboring segments. These numbers can be changed through respective
parameters.
Parameters
----------
x, y: Neo AnalogSignal or Quantity array or Numpy ndarray
A pair of time series data, between which coherence is computed. The
shapes and the sampling frequencies of `x` and `y` must be identical.
When `x` and `y` are not of AnalogSignal, sampling frequency
should be specified through the keyword argument `fs`, otherwise the
default value (`fs=1.0`) is used.
num_seg: int, optional
Number of segments. The length of segments is adjusted so that
overlapping segments cover the entire stretch of the given data. This
parameter is ignored if *len_seg* or *freq_res* is given. Default is 8.
len_seg: int, optional
Length of segments. This parameter is ignored if *freq_res* is given.
Default is None (determined from other parameters).
freq_res: Quantity or float, optional
Desired frequency resolution of the obtained coherence estimate in
terms of the interval between adjacent frequency bins. When given as a
float, it is taken as frequency in Hz. Default is None (determined from
other parameters).
overlap: float, optional
Overlap between segments represented as a float number between 0 (no
overlap) and 1 (complete overlap). Default is 0.5 (half-overlapped).
fs: Quantity array or float, optional
Specifies the sampling frequency of the input time series. When the
input time series are given as AnalogSignal, the sampling
frequency is taken from their attribute and this parameter is ignored.
Default is 1.0.
window, nfft, detrend, scaling, axis: optional
These arguments are directly passed on to a helper function
`elephant.spectral._welch()`. See the respective descriptions in the
docstring of `elephant.spectral._welch()` for usage.
Returns
-------
freqs: Quantity array or Numpy ndarray
Frequencies associated with the estimates of coherency and phase lag.
`freqs` is always a 1-dimensional array irrespective of the shape of
the input data. Quantity array is returned if `x` and `y` are of
AnalogSignal or Quantity array. Otherwise Numpy ndarray containing
frequency in Hz is returned.
coherency: Numpy ndarray
Estimate of coherency between the input time series. For each frequency
coherency takes a value between 0 and 1, with 0 or 1 representing no or
perfect coherence, respectively. When the input arrays `x` and `y` are
multi-dimensional, `coherency` is of the same shape as the inputs and
frequency is indexed along either the first or the last axis depending
on the type of the input: when the input is AnalogSignal, the
first axis indexes frequency, otherwise the last axis does.
phase_lag: Quantity array or Numpy ndarray
Estimate of phase lag in radian between the input time series. For each
frequency phase lag takes a value between -PI and PI, positive values
meaning phase precession of `x` ahead of `y` and vice versa. Quantity
array is returned if `x` and `y` are of AnalogSignal or Quantity
array. Otherwise Numpy ndarray containing phase lag in radian is
returned. The axis for frequency index is determined in the same way as
for `coherency`.
"""
# initialize a parameter dict (to be given to _welch()) with
# the parameters directly passed on to _welch()
params = {'window': window, 'nfft': nfft,
'detrend': detrend, 'scaling': scaling, 'axis': axis}
# When the input is AnalogSignal, the axis for time index is rolled to
# the last
xdata = np.asarray(x)
ydata = np.asarray(y)
if isinstance(x, neo.AnalogSignal):
xdata = np.rollaxis(xdata, 0, len(xdata.shape))
ydata = np.rollaxis(ydata, 0, len(ydata.shape))
# if the data is given as AnalogSignal, use its attribute to specify
# the sampling frequency
if hasattr(x, 'sampling_rate'):
params['fs'] = x.sampling_rate.rescale('Hz').magnitude
else:
params['fs'] = fs
if overlap < 0:
raise ValueError("overlap must be greater than or equal to 0")
elif 1 <= overlap:
raise ValueError("overlap must be less then 1")
# determine the length of segments (i.e. *nperseg*) according to given
# parameters
if freq_res is not None:
if freq_res <= 0:
raise ValueError("freq_res must be positive")
dF = freq_res.rescale('Hz').magnitude \
if isinstance(freq_res, pq.quantity.Quantity) else freq_res
nperseg = int(params['fs'] / dF)
if nperseg > xdata.shape[axis]:
raise ValueError("freq_res is too high for the given data size")
elif len_seg is not None:
if len_seg <= 0:
raise ValueError("len_seg must be a positive number")
elif xdata.shape[axis] < len_seg:
raise ValueError("len_seg must be shorter than the data length")
nperseg = len_seg
else:
if num_seg <= 0:
raise ValueError("num_seg must be a positive number")
elif xdata.shape[axis] < num_seg:
raise ValueError("num_seg must be smaller than the data length")
# when only *num_seg* is given, *nperseg* is determined by solving the
# following equation:
# num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
# ----------------- =============================== ^^^^^^^^^^^
# summed segment lengths total overlap data length
nperseg = int(xdata.shape[axis] / (num_seg - overlap * (num_seg - 1)))
params['nperseg'] = nperseg
params['noverlap'] = int(nperseg * overlap)
freqs, Pxy = _welch(xdata, ydata, **params)
freqs, Pxx = _welch(xdata, xdata, **params)
freqs, Pyy = _welch(ydata, ydata, **params)
coherency = np.abs(Pxy)**2 / (np.abs(Pxx) * np.abs(Pyy))
phase_lag = np.angle(Pxy)
# attach proper units to return values
if isinstance(x, pq.quantity.Quantity):
freqs = freqs * pq.Hz
phase_lag = phase_lag * pq.rad
# When the input is AnalogSignal, the axis for frequency index is
# rolled to the first to comply with the Neo convention about time axis
if isinstance(x, neo.AnalogSignal):
coherency = np.rollaxis(coherency, -1)
phase_lag = np.rollaxis(phase_lag, -1)
return freqs, coherency, phase_lag
|
bsd-3-clause
|
NSLS-II-SRX/ipython_ophyd
|
profile_srx_user-v1/ipython_qtconsole_config.py
|
13
|
24674
|
# Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
# The pygments lexer class to use.
# c.IPythonWidget.lexer_class = <IPython.utils.traitlets.Undefined object at 0x1866810>
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit, and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting kernel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside'
# The widget pages like a traditional terminal.
# 'hsplit'
# When paging is requested, the widget is split horizontally. The top
# pane contains the console, and the bottom pane contains the paged text.
# 'vsplit'
# Similar to 'hsplit', except that a vertical splitter is used.
# 'custom'
# No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none'
# The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'swilkins'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
|
bsd-2-clause
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_l1_activity.py
|
1
|
1294
|
"""
Plot the highest activity of four different bar positionings
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
extended = False
a1 = rs.PVReadSparse(sys.argv[1], extended)
end = int(sys.argv[2])
numofsteps = int(sys.argv[3])
nx = a1.nx
ny = a1.ny
numneur = nx * ny
activity = []
count = 0
counta=0
for k in range(end):
A=a1.next_activity()
#print "sum = ", np.sum(A)
d = k / numofsteps
#act = np.append(activity, np.sum(A))
act = np.sum(A)
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A1p = act
#print "k at first = ", k
else:
A1p = np.vstack((A1p,act))
if k == (numofsteps-1):
A1q = 0 #A1p.sum(axis=0)
#print A1q
if k == ((numofsteps*d) + (numofsteps-1)): #and k != (numofsteps-1):
A1q = np.vstack((A1q, A1p.sum(axis=0)))
#print A1q
t1 = A1q / float(numneur)
#print t1
t1 = t1 / (numofsteps / 2000.0)
#print t1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(np.shape(t1)[0]), t1, color='y', ls = '-')
plt.show()
sys.exit()
|
epl-1.0
|
jiajunshen/partsNet
|
scripts/visualizeWheretoCodeParts.py
|
1
|
7169
|
from __future__ import division, print_function,absolute_import
import pylab as plt
import amitgroup.plot as gr
import numpy as np
import amitgroup as ag
import os
import pnet
import matplotlib.pylab as plot
from pnet.cyfuncs import index_map_pooling
from queue import Queue
def extract(ims,allLayers):
#print(allLayers)
curX = ims
for layer in allLayers:
#print('-------------')
#print(layer)
curX = layer.extract(curX)
#print(np.array(curX).shape)
#print('------------------')
return curX
def partsPool(originalPartsRegion, numParts):
partsGrid = np.zeros((1,1,numParts))
for i in range(originalPartsRegion.shape[0]):
for j in range(originalPartsRegion.shape[1]):
if(originalPartsRegion[i,j]!=-1):
partsGrid[0,0,originalPartsRegion[i,j]] = 1
return partsGrid
def test(ims,labels,net):
yhat = net.classify((ims,2000))
return yhat == labels
#def trainPOP():
if pnet.parallel.main(__name__):
#X = np.load("testMay151.npy")
#X = np.load("_3_100*6*6_1000*1*1_Jun_16_danny.npy")
#X = np.load("Jun26Rot.npy")
#X = np.load("Jun29Rot.npy")
#X = np.load("sequential6*6.npy")
X = np.load("original6*6.npy")
model = X.item()
# get num of Parts
#numParts = model['layers'][0]['num_true_parts'] * model['layers'][0]['num_orientations']
numParts = model['layers'][1]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
ims,labels = ag.io.load_mnist('training')
ORI = 8
trainingDataNum = 1000 * ORI
firstLayerShape = 6
from skimage import transform
allTrainData = []
angles = np.arange(0,360,360/ORI)
for i in range(1000):
allTrainData.append(np.asarray([transform.rotate(ims[i],angle,resize=False,mode = 'nearest') for angle in angles]))
allTrainData = np.asarray(allTrainData)
print(allTrainData.shape)
allTrainData = allTrainData.reshape((1000 * angles.shape[0],) + allTrainData.shape[2:])
print(allTrainData.shape)
#gr.images(allTrainData[:200],show=False,fileName = 'rotataion.png')
if 1:
#extractedFeature = allLayer[0].extract(allTrainData)[0]
#np.save('extractedFeatureJun29.npy',extractedFeature)
#extractedFeature = np.load('extractedFeatureJun29.npy')
partsPlot = np.zeros((numParts,firstLayerShape,firstLayerShape))
partsCodedNumber = np.zeros(numParts)
imgRegion= [[] for x in range(numParts)]
partsRegion = [[] for x in range(numParts)]
#print(extractedFeature.shape)
print(trainingDataNum)
secondLayerCodedNumber = 0
secondLayerShape = 12
frame = (secondLayerShape - firstLayerShape)/2
frame = int(frame)
totalRange = 29 - firstLayerShape
numSecondLayerParts = 20
#allLayer = np.load('firstLayerInformationJun29.npy')
allPartsLayer = np.load('exPartsJun29.npy')
#partsRegion = np.load('/var/tmp/partsRegionJun29.npy')
partsRegion = np.load('/var/tmp/partsRegionOriginalJun29.npy')
allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts))
allPartsLayerMeanImg = np.zeros((numParts,numSecondLayerParts,secondLayerShape,secondLayerShape))
allPartsLayerImg = [[[] for j in range(numSecondLayerParts)] for i in range(numParts)]
#imgRegion = np.load('/var/tmp/imgRegionJun29.npy')
imgRegion = np.load('/var/tmp/imgRegionOriginalJun29.npy')
notTrain = 0;
for i in range(numParts):
if(allPartsLayer[i][0].trained == False):
notTrain+=1
continue
extractedFeaturePart = extract(partsRegion[i],allPartsLayer[i])[0]
for j in range(extractedFeaturePart.shape[0]):
if(extractedFeaturePart[j,0,0,0]!=-1):
partIndex = extractedFeaturePart[j,0,0,0]
#allPartsLayerImg[i][partIndex].append(allTrainData[imgRegion[i][j]])
#allPartsLayerMeanImg[i,partIndex,:]+=allTrainData[imgRegion[i][j]]
allPartsLayerImg[i][partIndex].append(imgRegion[i][j])
allPartsLayerMeanImg[i,partIndex,:]+=imgRegion[i][j]
allPartsLayerImgNumber[i,partIndex]+=1
print(allPartsLayerImgNumber)
newAllPartsLayerImg = [[[] for j in range(numSecondLayerParts)] for i in range(numParts)]
for i in range(numParts):
for j in range(numSecondLayerParts):
allPartsLayerMeanImg[i,j,:] = allPartsLayerMeanImg[i,j,:]/allPartsLayerImgNumber[i,j]
for k in range(len(allPartsLayerImg[i][j])):
newAllPartsLayerImg[i][partIndex].append(np.asarray(allPartsLayerImg[i][j][k]))
import matplotlib.pylab as plot
import random
print("==================================================")
print(notTrain)
settings = {'interpolation':'nearest','cmap':plot.cm.gray,}
settings['vmin'] = 0
settings['vmax'] = 1
secondLevelCurTestX = np.zeros((25, 29 - secondLayerShape, 29 - secondLayerShape,1,1,numParts))
secondLevelCurTestXCenter = np.zeros((25, 29 - secondLayerShape, 29 - secondLayerShape))
codeData = ims[0:25]
curTest = extract(codeData,allLayer[0:2])[0]#allLayer[0].extract(codeData)[0]
curTest = curTest.reshape(curTest.shape[0:3])
print(curTest.shape)
for m in range(totalRange)[frame:totalRange - frame]:
for n in range(totalRange)[frame:totalRange - frame]:
print(m,n)
secondLevelCurTestX[:,m-frame,n-frame] = index_map_pooling(curTest[:,m-frame:m+frame+1,n-frame:n+frame+1],numParts,(2 * frame + 1,2*frame+1), (2*frame+1, 2* frame+1))
secondLevelCurTestXCenter[:,m-frame, n-frame] = curTest[:,m,n]
thirdLevelCurTestX = np.zeros((25,29 - secondLayerShape,29 - secondLayerShape))
plotData = np.ones(((10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * 5 + 10,(10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * 5 + 10)) * 0.8
for p in range(5):
for q in range(5):
i = 5 * p + q
for m in range(29 - secondLayerShape):
for n in range(29 - secondLayerShape):
if(secondLevelCurTestXCenter[i,m,n]!=-1):
firstLevelPartIndex = int(secondLevelCurTestXCenter[i,m,n])
extractedFeaturePart = extract(np.array(secondLevelCurTestX[i,m,n][np.newaxis,:],dtype = np.uint8),allPartsLayer[firstLevelPartIndex])[0]
plotData[ (10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * p + (2 + secondLayerShape) * m: (10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * p + (2 + secondLayerShape) * m + 12, (10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * q + (2 + secondLayerShape) * n:(10 + (2 + secondLayerShape) * (29 - secondLayerShape)) * q + (2 + secondLayerShape) * n + 12] = allPartsLayerMeanImg[firstLevelPartIndex,extractedFeaturePart]
plot.figure(figsize=(10,40))
plot.axis('off')
plot.imshow(plotData,**settings)
plot.savefig('visualizePatchesCodedtoExParts.pdf',format='pdf',dpi = 900)
|
bsd-3-clause
|
leonro/magpy-git
|
magpy/mpplot.py
|
1
|
110747
|
'''
Path: magpy.mpplot
Part of package: stream (plot)
Type: Library of matplotlib plotting functions
PURPOSE:
This script provides multiple functions for plotting a stream as well
as analysing various properties of a stream.
All plots are done with python's matplotlib package.
CONTAINS:
(MAIN...)
plot: (Func) Will plot variables from a single stream.
plotStreams: (Func) Plots multiple variables from multiple streams.
ploteasy: (Func) Quick & easy plotting function that plots all data.
(EXTENDED...)
plotFlag: (Func) Enables flagging in plot
plotEMD: (Func) Plots Empirical Mode Decomposition from magpy.opt.emd
plotNormStreams:(Func) Plot normalised streams
plotPS: (Func) Plots the power spectrum of a given key.
plotSatMag: (Func) Useful tool for plotting magnetic and satellite data.
plotSpectrogram:(Func) Plots spectrogram of a given key.
plotStereoplot: (Func) Plots stereoplot of inc and dec values.
obspySpectrogram:(Func) Spectrogram plotting function taken from ObsPy.
(HELPER/INTERNAL FUNCTIONS...)
_plot: (Func) ... internal function to funnel plot information
into a matplotlib plot object.
_confinex: (Func) ... utility function of _plot.
maskNAN: (Func) ... utility function of _plot.
nan_helper: (Func) ... utility function of _plot.
denormalize: (Func) ... utility function of _plot.
DEPENDENCIES:
magpy.stream
magpy.opt.emd
matplotlib
CALLED BY:
External data plotting and analysis scripts only.
'''
from __future__ import print_function
from __future__ import absolute_import
from magpy.stream import *
'''
try:
import matplotlib
if not os.isatty(sys.stdout.fileno()): # checks if stdout is connected to a terminal (if not, cron is starting the job)
print "No terminal connected - assuming cron job and using Agg for matplotlib"
matplotlib.use('Agg') # For using cron
except:
print "Prob with matplotlib"
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").split("."))
MATPLOTLIB_VERSION = version
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
print "Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION)
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
loggerplot.error("plot package: Matplotlib import error. If missing, please install to proceed.")
loggerplot.error("Error string: %s" % e)
raise Exception("CRITICAL IMPORT ERROR FOR PLOT PACKAGE: Matplotlib import error.")
'''
# TODO:
# - Move all plotting functions over from stream.
# STILL TO FIX:
# spectrogram()
# TODO: ORIGINAL FUNCTION HAS ERRORS.
# renamed to plotSpectrogram
# changed variable title to plottitle
# obspyspectrogram()
# renamed to obspySpectrogram.
# DONE:
# plot() + plotStreams()
# powerspectrum()
# renamed to plotPS
# changed variable title to plottitle
# stereoplot()
# renamed to plotStereoplot
#
# KNOWN BUGS:
# - None? :)
colorlist = ['b','g','m','c','y','k','b','g','m','c','y','k']
symbollist = ['-','-','-','-','-','-','-','-','-','-','-','-']
gridcolor = '#316931'
labelcolor = '0.2'
def ploteasy(stream):
'''
DEFINITION:
Plots all data in stream. That's it.
This function has no formatting options whatsoever.
Very useful for quick & easy data evaluation.
PARAMETERS:
Variables:
- stream: (DataStream object) Stream to plot
RETURNS:
- plot: (Pyplot plot) Returns plot as plt.show()
EXAMPLE:
>>> ploteasy(somedata)
'''
keys = stream._get_key_headers(numerical=True)
if len(keys) > 9:
keys = keys[:8]
try:
sensorid = stream.header['SensorID']
except:
sensorid = ''
try:
datadate = datetime.strftime(num2date(stream[0].time),'%Y-%m-%d')
except:
datadate = datetime.strftime(num2date(stream.ndarray[0][0]),'%Y-%m-%d')
plottitle = "%s (%s)" % (sensorid,datadate)
print("Plotting keys:", keys)
plot_new(stream, keys,
confinex = True,
plottitle = plottitle)
#####################################################################
# #
# MAIN PLOTTING FUNCTIONS #
# (for plotting geomagnetic data) #
# #
#####################################################################
def plot_new(stream,variables=[],specialdict={},errorbars=False,padding=0,noshow=False,
annotate=False,stormphases=False,colorlist=colorlist,symbollist=symbollist,
t_stormphases=None,includeid=False,function=None,plottype='discontinuous',resolution=None,
**kwargs):
plot(stream,variables=variables,specialdict=specialdict,errorbars=errorbars,padding=padding,
noshow=noshow,annotate=annotate,stormphases=stormphases,colorlist=colorlist,
symbollist=symbollist,t_stormphases=t_stormphases,includeid=includeid,
function=function,plottype=plottype,resolution=resolution, **kwargs)
def plot(stream,variables=[],specialdict={},errorbars=False,padding=0,noshow=False,
annotate=False,stormphases=False,colorlist=colorlist,symbollist=symbollist,
t_stormphases=None,includeid=False,function=None,plottype='discontinuous',resolution=None,
**kwargs):
'''
DEFINITION:
This function creates a graph from a single stream.
PARAMETERS:
Variables:
- stream: (DataStream object) Stream to plot
- variables: (list) List of variables to plot.
Kwargs:
- annotate: (bool/list=False) If True, will annotate plot.
- bartrange: (float) Variable for plotting of bars.
- bgcolor: (color='white') Background colour of plot.
- colorlist: (list(colors)) List of colours to plot with.
Default = ['b','g','m','c','y','k','b','g','m','c','y','k']
- confinex: (bool=False) x-axis will be confined to smaller t-values if True.
- errorbars: (bool/list=False) If True, will plot corresponding errorbars:
[ [False], [True], [False, False] ]
- fill: (list = []) List of keys for which the plot uses fill_between
- fmt: (str) Format of outfile.
- fullday: (bool=False) Will plot fullday if True.
- function: (func) [0] is a dictionary containing keys (e.g. fx),
[1] the startvalue, [2] the endvalue
Plot the content of function within the plot.
CAUTION: Not yet implemented.
- grid: (bool=True) If True, will plot grid.
- gridcolor: (color='#316931') Colour of grid.
- includeid: (bool) If True, sensor IDs will be extracted from header data and
plotted alongside corresponding data. Default=False
- labelcolor: (color='0.2') Colour of labels.
- outfile: (str) Path of file to plot figure to.
- padding: (float/list) Float or list of padding for each variable.
- plottitle: (str) Title to put at top of plot.
- plottype: (NumPy str='discontinuous') Can also be 'continuous'.
- savedpi: (float=80) Determines dpi of outfile.
- specialdict: (dictionary) contains special information for specific plots.
key corresponds to the column
input is a list with the following parameters
{'x':[ymin,ymax]}
- stormphases: (bool/list) If True, will plot shaded and annotated storm phases.
NOTE: Also requires variable t_stormphases.
- symbollist: (list) List of symbols to plot with. Default= '-' for all.
- t_stormphases:(dict) Dictionary (2 <= len(dict) <= 4) containing datetime objects.
dict('ssc') = time of SSC
dict('mphase') = time of start of main phase / end of SSC
dict('rphase') = time of start of recovery phase / end of main phase
dict('stormend') = end of recovery phase
RETURNS:
- plot: (Pyplot plot) Returns plot as plt.show or savedfile
if outfile is specified.
EXAMPLE:
>>>
APPLICATION:
'''
# Test whether columns really contain data or whether only nans are present:
#print stream.ndarray
stream = stream._remove_nancolumns()
availablekeys = stream._get_key_headers(numerical=True)
#print stream.ndarray
# if no variables are given, use all available:
if len(variables) < 1:
variables = availablekeys
else:
variables = [var for var in variables if var in availablekeys]
if len(variables) > 9:
print("More than 9 variables available - plotting only the first nine:", end=' ')
print("Available:", variables)
variables = variables[:9]
print("Plotting:", variables)
else:
print("Plotting:", variables)
# Check lists for variables have correct length:
num_of_var = len(variables)
if num_of_var > 9:
loggerplot.error("plot: Can't plot more than 9 variables, sorry.")
raise Exception("Can't plot more than 9 variables!")
if len(symbollist) < num_of_var:
loggerplot.error("plot: Length of symbol list does not match number of variables.")
raise Exception("Length of symbol list does not match number of variables.")
if len(colorlist) < num_of_var:
loggerplot.error("plot: Length of color list does not match number of variables.")
raise Exception("Length of color list does not match number of variables.")
plot_dict = []
count = 0
# The follow four variables can be given in two ways:
# bool: annotate = True --> all plots will be annotated
# list: annotate = [False, True, False] --> only second plot will be annotated
# These corrections allow for simple variable definition during use.
if type(errorbars) == list:
errorbars = [errorbars]
else:
errorbars = errorbars
if type(stormphases) == list:
stormphases = [stormphases]
else:
stormphases = stormphases
if type(annotate) == list:
annotate = [annotate]
else:
annotate = annotate
if type(padding) == list:
padding = [padding]
else:
padding = padding
plotStreams([stream], [ variables ], specialdict=[specialdict],noshow=noshow,
errorbars=errorbars,padding=padding,annotate=annotate,stormphases=stormphases,
colorlist=colorlist,symbollist=symbollist,t_stormphases=t_stormphases,
includeid=includeid,function=function,plottype=plottype,resolution=resolution,**kwargs)
def plotStreams(streamlist,variables,padding=None,specialdict={},errorbars=None,
colorlist=colorlist,symbollist=symbollist,annotate=None,stormphases=None,
t_stormphases={},includeid=False,function=None,plottype='discontinuous',
noshow=False,labels=False,resolution=None,**kwargs):
'''
DEFINITION:
This function plots multiple streams in one plot for easy comparison.
PARAMETERS:
Variables:
- streamlist: (list) A list containing the streams to be plotted
in a list, e.g.:
[ stream1, stream2, etc...]
[ fge, pos1, env1 ]
- variables: (list(list)) List containing the variables to be plotted
from each stream, e.g:
[ ['x'], ['f'], ['t1', 't2'] ]
Args:
LISTED VARIABLES:
(NOTE: All listed variables must correspond in size to the variable list.)
- annotate: (bool/list(bool)) If True, will annotate plot with flags, e.g.:
[ [True], [True], [False, False] ]
(Enter annotate = True for all plots to be annotated.)
- errorbars: (bool/list(bool)) If True, will plot corresponding errorbars:
[ [False], [True], [False, False] ]
(Enter errorbars = True to plot error bars on all plots.)
- labels: [ (str) ] List of labels for each stream and variable, e.g.:
[ ['FGE'], ['POS-1'], ['ENV-T1', 'ENV-T2'] ]
- padding: (float/list(list)) List of lists containing paddings for each
respective variable, e.g:
[ [5], [5], [0.1, 0.2] ]
(Enter padding = 5 for all plots to use 5 as padding.)
- stormphases: (bool/list(bool)) If True, will plot shaded and annotated storm phases.
(Enter stormphases = True to plot storm on all plots.)
NOTE: Also requires variable t_stormphases.
- specialdict: (list(dict)) Same as plot variable, e.g:
[ {'z': [100,150]}, {}, {'t1':[7,8]} ]
NORMAL VARIABLES:
- bartrange: (float) Variable for plotting of bars.
- bgcolor: (color='white') Background colour of plot.
- colorlist: (list(colors)) List of colours to plot with.
Default = ['b','g','m','c','y','k','b','g','m','c','y','k']
- confinex: (bool=False) x-axis will be confined to smaller t-values if True.
- fmt: (str) Format of outfile.
- fullday: (bool=False) Will plot fullday if True.
- function: (func) [0] is a dictionary containing keys (e.g. fx),
[1] the startvalue, [2] the endvalue
Plot the content of function within the plot.
CAUTION: Not yet implemented.
- grid: (bool=True) If True, will plot grid.
- gridcolor: (color='#316931') Colour of grid.
- includeid: (bool) If True, sensor IDs will be extracted from header data and
plotted alongside corresponding data. Default=False
- labelcolor: (color='0.2') Colour of labels.
- opacity: (0.0 to 1.0) Opacity applied to fills and bars.
- legendposition: (str) Position of legend (when var labels is used), e.g. 'upper left'
- noshow: (bool) If True, figure object will be returned. Default=False
- outfile: (str) Path of file to plot figure to.
- plottitle: (str) Title to put at top of plot.
- plottype: (NumPy str='discontinuous') Can also be 'continuous'.
- savedpi: (float=80) Determines dpi of outfile.
- symbollist: (list) List of symbols to plot with. Default= '-' for all.
- resolution: (int) Resolution of plot. Amount of points are reduced to this value.
- t_stormphases:(dict) Dictionary (2 <= len(dict) <= 4) containing datetime objects.
dict('ssc') = time of SSC
dict('mphase') = time of start of main phase / end of SSC
dict('rphase') = time of start of recovery phase / end of main phase
dict('stormend') = end of recovery phase
WARNING: If recovery phase is defined as past the end of
the data to plot, it will be plotted in addition to the
actual data.
RETURNS:
- plot: (Pyplot plot) Returns plot as plt.show or saved file
if outfile is specified.
EXAMPLE:
>>> plotStreams(streamlist, variables, padding=5, outfile='plots.png')
APPLICATION:
fge_file = fge_id + '_' + date + '.cdf'
pos_file = pos_id + '_' + date + '.bin'
lemi025_file = lemi025_id + '_' + date + '.bin'
cs_file = cs_id + '_' + date + '.bin'
fge = read(fge_file)
pos = read(pos_file)
lemi025 = read(lemi025_file,tenHz=True)
cs = read(cs_file)
streamlist = [ fge, cs, lemi025, pos ]
variables = [ ['x','y','z'], ['f'], ['z'], ['f'] ]
specialdict = [ {}, {'f':[48413,48414]}, {}, {} ]
errorbars = [ [False,False,False], [False], [False], [True] ]
padding = [ [1,1,1], [1], [1] , [1] ]
annotate = [ [False,False,False], [True], [True] , [True] ]
# TO PLOT FOUR DIFFERENT STREAMS WITH 7 VARIABLES TO A FILE:
plotStreams(streamlist, variables, padding=padding,specialdict=specialdict,
annotate=annotate,includeid=True,errorbars=errorbars,
outfile='plots/all_magn_cut1.png',
plottitle="WIC: All Magnetometers (%s)" % date)
# TO PLOT DATA AND RETURN A FIGURE FOR FURTHER:
plot = plotStreams(streamlist, variables, noshow=True)
plot.title("New title.")
plot.savefig("newfig.png")
'''
num_of_var = 0
for item in variables:
num_of_var += len(item)
if num_of_var > 9:
loggerplot.error("plotStreams: Can't plot more than 9 variables, sorry.")
raise Exception("Can't plot more than 9 variables!")
# Check lists for variables have correct length:
if len(symbollist) < num_of_var:
loggerplot.error("plotStreams: Length of symbol list does not match number of variables.")
raise Exception("Length of symbol list does not match number of variables.")
if len(colorlist) < num_of_var:
loggerplot.error("plotStreams: Length of color list does not match number of variables.")
raise Exception("Length of color list does not match number of variables.")
plot_dict = []
count = 0
if not resolution:
resolution = 5000000 # 40 days of 1 second data can be maximaly shown in detail, 5 days of 10 Hz
# Iterate through each variable, create dict for each:
for i in range(len(streamlist)):
stream = streamlist[i]
ndtype = False
try:
t = stream.ndarray[KEYLIST.index('time')]
lentime = len(t)
if not lentime > 0:
x=1/0
if lentime > resolution:
loggerstream.info("plot: Reducing data resultion ...")
stepwidth = int(len(t)/resolution)
t = t[::stepwidth]
# Redetermine lentime
lentime = len(t)
loggerstream.info("plot: Start plotting of stream with length %i" % len(stream.ndarray[0]))
ndtype = True
except:
t = np.asarray([row[0] for row in stream])
loggerstream.info("plot: Start plotting of stream with length %i" % len(stream))
#t = np.asarray([row[0] for row in stream])
for j in range(len(variables[i])):
data_dict = {}
key = variables[i][j]
loggerplot.info("plotStreams: Determining plot properties for key %s." % key)
if not key in NUMKEYLIST:
loggerplot.error("plot: Column key (%s) not valid!" % key)
raise Exception("Column key (%s) not valid!" % key)
ind = KEYLIST.index(key)
try:
y = stream.ndarray[ind]
if not len(y) > 0:
x=1/0
if len(y) > resolution:
stepwidth = int(len(y)/resolution)
y = y[::stepwidth]
if len(y) != lentime:
loggerplot.error("plotStreams: Dimensions of time and %s do not match!" % key)
raise Exception("Dimensions of time and %s do not match!")
except:
y = np.asarray([float(row[ind]) for row in stream])
#y = np.asarray([row[ind] for row in stream])
if len(y) == 0:
loggerplot.error("plotStreams: Cannot plot stream of zero length!")
# eventually remove flagged:
dropflagged = False
if dropflagged:
flagind = KEYLIST.index('flag')
flags = stream.ndarray[flagind]
ind = KEYLIST.index(key)
flagarray = np.asarray([list(el)[ind] for el in flags])
print("Flagarray", flagarray)
indicies = np.where(flagarray == '1')
print("Indicies", indicies)
#for index in indicies:
# y[index] = NaN
#y[index] = float('nan')
#newflag = flags[0][ind]
#newflag[indexflag] = '0'
#data[i]['flags'][0][ind] == newflag
#y = np.delete(np.asarray(y),indicies)
#print len(t), len(y), np.asarray(y)
# Fix if NaNs are present:
if plottype == 'discontinuous':
y = maskNAN(y)
else:
nans, test = nan_helper(y)
newt = [t[idx] for idx, el in enumerate(y) if not nans[idx]]
t = newt
y = [el for idx, el in enumerate(y) if not nans[idx]]
#print len(t), len(y), np.asarray(y), np.asarray(t)
if len(y) == 0:
loggerplot.error("plotStreams: Cannot plot stream without data - Filling with 9999999!")
if len(stream.ndarray[0]) > 0:
y = np.asarray([9999999 for row in stream.ndarray[0]])
else:
y = np.asarray([9999999 for row in stream])
data_dict['key'] = key
data_dict['tdata'] = t
data_dict['ydata'] = y
data_dict['color'] = colorlist[count]
data_dict['symbol'] = symbollist[count]
# Define padding for each variable:
if padding:
if type(padding) == list:
ypadding = padding[i][j]
else:
ypadding = padding
else:
ypadding = (np.max(y)- np.min(y))*0.05 # 0
# If limits are specified, use these:
if specialdict:
if key in specialdict[i]:
specialparams = specialdict[i][key]
data_dict['ymin'] = specialparams[0] - ypadding
data_dict['ymax'] = specialparams[1] + ypadding
else:
if not (np.min(y) == np.max(y)):
data_dict['ymin'] = np.min(y) - ypadding
data_dict['ymax'] = np.max(y) + ypadding
else:
loggerplot.warning('plot: Min and max of key %s are equal. Adjusting axes.' % key)
data_dict['ymin'] = np.min(y) - 0.05
data_dict['ymax'] = np.max(y) + 0.05
else:
if not (np.min(y) == np.max(y)):
data_dict['ymin'] = np.min(y) - ypadding
data_dict['ymax'] = np.max(y) + ypadding
else:
loggerplot.warning('plot: Min and max of key %s are equal. Adjusting axes.' % key)
data_dict['ymin'] = np.min(y) - 0.5
data_dict['ymax'] = np.max(y) + 0.5
# Define y-labels:
try:
ylabel = stream.header['col-'+key].upper()
except:
ylabel = ''
pass
try:
yunit = stream.header['unit-col-'+key]
except:
yunit = ''
pass
if not yunit == '':
yunit = re.sub('[#$%&~_^\{}]', '', yunit)
label = ylabel+' $['+yunit+']$'
elif yunit == None:
loggerplot.warning("No units for key %s! Empty column?" % key)
label = ylabel
else:
label = ylabel
data_dict['ylabel'] = label
# Create array for errorbars:
if errorbars:
if type(errorbars) == list:
if errorbars[i][j]:
ind = KEYLIST.index('d'+key)
if ndtype:
errors = stream.ndarray[ind]
else:
errors = np.asarray([row[ind] for row in stream])
if len(errors) > 0:
data_dict['errors'] = errors
else:
loggerplot.warning("plot: No errors for key %s. Leaving empty." % key)
else:
ind = KEYLIST.index('d'+key)
if ndtype:
errors = stream.ndarray[ind]
else:
errors = np.asarray([row[ind] for row in stream])
if len(errors) > 0:
data_dict['errors'] = errors
else:
loggerplot.warning("plot: No errors for key %s. Leaving empty." % key)
# Annotate flagged data points:
if annotate:
if type(annotate) == list:
if annotate[i][j]:
if ndtype:
ind = KEYLIST.index('flag')
flag = stream.ndarray[ind]
ind = KEYLIST.index('comment')
comments = stream.ndarray[ind]
else:
flag = stream._get_column('flag')
comments = stream._get_column('comment')
flags = array([flag,comments], dtype=object)
data_dict['annotate'] = True
data_dict['flags'] = flags
else:
data_dict['annotate'] = False
else:
if ndtype:
ind = KEYLIST.index('flag')
flag = stream.ndarray[ind]
ind = KEYLIST.index('comment')
comments = stream.ndarray[ind]
else:
flag = stream._get_column('flag')
comments = stream._get_column('comment')
flags = array([flag,comments], dtype=object)
#print "plotStreams1", flags
data_dict['annotate'] = True
data_dict['flags'] = flags
else:
data_dict['annotate'] = False
#print "plotStreams2", data_dict['flags']
# Plot a function:
if function:
data_dict['function'] = function
# Plot shaded storm phases:
if stormphases:
if not t_stormphases:
loggerplot.error("plotStreams: No variable t_stormphases for plotting phases.")
raise Exception("Require variable t_stormphases when stormphases=True!")
if len(t_stormphases) not in [1,2,3,4]:
loggerplot.error("plotStreams: Length of variable t_stormphases incorrect.")
raise Exception("Something is wrong with length of variable t_stormphases!")
if type(stormphases) == list:
if stormphases[i][j]:
data_dict['stormphases'] = t_stormphases
else:
data_dict['stormphases'] = t_stormphases
# Add labels:
if labels:
data_dict['datalabel'] = labels[i][j]
else:
data_dict['datalabel'] = ''
# Include sensor IDs:
if includeid:
try:
sensor_id = stream.header['SensorID']
data_dict['sensorid'] = sensor_id
except:
loggerplot.warning("plotStreams: No sensor ID to put into plot!")
plot_dict.append(data_dict)
count += 1
loggerplot.info("plotStreams: Starting plotting function...")
if not noshow:
_plot(plot_dict, **kwargs)
loggerplot.info("plotStreams: Plotting completed.")
else:
fig = _plot(plot_dict, noshow=True, **kwargs)
loggerplot.info("plotStreams: Plotting completed.")
return fig
#####################################################################
# #
# EXTENDED PLOTTING FUNCTIONS #
# (for more advanced functions) #
# #
#####################################################################
#####################################################################
# Flagging #
#####################################################################
def toggle_selector(event):
print (' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
class figFlagger():
def __init__(self, data = None, variables=None, figure=False):
self.data = data
self.offset = False
self.mainnum = 1
self.flagid = 3
self.reason = 'why because'
self.idxarray = []
self.figure = False
self.axes = False
self.orgkeylist = self.data._get_key_headers()
if not variables: #or variables == ['x','y','z','f'] or variables == ['x','y','z']:
try:
self.data = self.analyzeData(self.orgkeylist)
except:
print("plotFlag: You have to provide variables for this data set")
keylist = self.data._get_key_headers(numerical=True)
#print keylist
if variables:
keylist = variables
if len(keylist) > 9:
keylist = keylist[:8]
#print keylist
self.keylist = keylist
annotatelist = [True if elem in self.orgkeylist else False for elem in keylist] # if elem in ['x','y','z'] else False]
self.fig = plotStreams([self.data], [keylist], noshow=True, annotate=[annotatelist])
radio, hzfunc = self.startup(self.fig, self.data)
radio.on_clicked(hzfunc)
if figure:
self.figure = self.fig
self.axes = self.fig.axes
else:
plt.show()
def analyzeData(self,keylist):
#keylist = self.data._get_key_headers()
if not len(self.data.ndarray[0]) > 0:
print("No ndarrayfound:")
print(" -- stream will be converted to ndarray type")
self.data = self.data.linestruct2ndarray()
if 'x' in keylist and 'y' in keylist and 'z' in keylist:
self.data = self.data.differentiate(keys=['x','y','z'],put2keys = ['dx','dy','dz'])
if 'f' in keylist:
self.data = self.data.delta_f()
return self.data
def line_select_callback(self, eclick, erelease):
'eclick and erelease are the press and release events'
#print "Selected line---:",self.mainnum
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
#print "(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2)
#print " The button you used were: %s %s" % (eclick.button, erelease.button)
self.selarray = [x1, y1, x2, y2]
self.annotate(self.data, self.mainnum, self.selarray)
def toggle_selector(self, event):
#print (' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
if event.key in ['F', 'f']:
print(' Flag data:')
print(' ------------------------------------------')
print(" Selected data point:", len(self.idxarray))
plt.clf()
plt.close()
if event.key in ['2']:
print(' Setting default Flag ID to 2.')
print(' ------------------------------------------')
print(" -- keep data in any case - Observators decision")
self.flagid = 2
if event.key in ['3']:
print(' Setting default Flag ID to 3.')
print(' ------------------------------------------')
print(" -- not to be used for definite - Observators decision")
self.flagid = 3
if event.key in ['L', 'l']:
print(' Data:')
print(' ------------------------------------------')
print("Length:", data.length())
#stream.write("")
if event.key in ['O', 'o']:
print(' Apply offset:')
print(' ------------------------------------------')
print(" Selected data point:", len(self.idxarray))
self.offset = True
plt.clf()
plt.close()
if event.key in ['H', 'h']:
print(' Header:')
print(' ------------------------------------------')
print(data.header)
if event.key in ['c', 'C']:
print(' Close flagging and store data:')
print(' ------------------------------------------')
self.idxarray = 0
plt.clf()
plt.close('all')
def annotate(self, data, numb, selarray):
# Selecting the time range
#print "Dataarray", data.ndarray
selectedndarray = []
keyar = []
#print "Selected range:", selarray
minbound = min([selarray[1],selarray[3]])
maxbound = max([selarray[1],selarray[3]])
idxstart = np.abs(data.ndarray[0].astype(float)-min(selarray[0],selarray[2])).argmin()
idxend = np.abs(data.ndarray[0].astype(float)-max(selarray[0],selarray[2])).argmin()
for i in range(len(data.ndarray)):
if len(data.ndarray[i]) > idxstart: # and KEYLIST[i] in self.keylist:
if KEYLIST[i] in self.keylist or KEYLIST[i] == 'time': #i in range(len(FLAGKEYLIST)) and
keyar.append(KEYLIST[i])
timear = data.ndarray[i][idxstart:idxend].astype(float)
selectedndarray.append(timear)
selectedndarray = np.asarray(selectedndarray)
newselectedndarray = []
for i in range(len(selectedndarray)):
allar = [elem for idx, elem in enumerate(selectedndarray[i]) if selectedndarray[numb][idx] >= minbound and selectedndarray[numb][idx] <= maxbound ]
if i == 0:
self.idxar = [idx+idxstart for idx, elem in enumerate(selectedndarray[i]) if selectedndarray[numb][idx] >= minbound and selectedndarray[numb][idx] <= maxbound ]
newselectedndarray.append(allar)
newselectedndarray = np.asarray(newselectedndarray).astype(float)
self.idxar = np.asarray(self.idxar)
# Some cleanup
del selectedndarray
self.markpoints(newselectedndarray,keyar)
self.idxarray.extend(self.idxar)
print("Selected %d points to annotate:" % len(self.idxarray))
def markpoints(self, dataarray,keyarray):
for idx,elem in enumerate(dataarray):
key = keyarray[idx]
#print "Selected curve - markpoints:", idx
#print dataarray[idx]
if not idx == 0 and not len(elem) == 0 and key in self.keylist: #FLAGKEYLIST:
#print ( idx, self.axlist[idx-1] )
ax = self.axlist[idx-1]
#ax.clear()
#ax.text(dataarray[0][1],dataarray[1][1], "(%s, %3.2f)"%("Hello",3.67), )
ax.scatter(dataarray[0].astype('<f8'),elem.astype('<f8'), c='r', zorder=100) #, marker='d', c='r') #, zorder=100)
#plt.connect('key_press_event', toggle_selector)
plt.draw()
def hzfunc(self,label):
ax = self.hzdict[label]
num = int(label.replace("plot ",""))
#print "Selected axis number:", num
#global mainnum
self.mainnum = num
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = RectangleSelector(ax, self.line_select_callback,
drawtype='box', useblit=True,
button=[1,3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
rectprops = dict(facecolor='red', edgecolor = 'black', alpha=0.2, fill=True))
#plt.connect('key_press_event', toggle_selector)
plt.draw()
def flag(self, idxarray , flagid, reason, keylist):
print("Flagging components %s with flagid %d, because of %s" % (','.join(keylist), flagid, reason))
self.data = self.data.flagfast(idxarray, flagid, reason, keylist)
def startup(self, fig, data):
print("--------------------------------------------")
print(" you started the build-in flagging function")
print("--------------------------------------------")
print(" -- use mouse to select rectangular areas")
print(" -- press f for flagging this region")
print(" -- press 2,3 to change default flag ID")
print(" -- press l to get some basic data info")
print(" -- press o to apply an offset")
print(" -- press h to get all meta information")
print(" -- press c to close the window and allow saving")
# Arrays to exchange data
self.selarray = []
# Globals
self.idxar = [] # holds all selected index values
#mainnum = 1 # holds the selected figure axis
self.axlist = fig.axes
# #############################################################
## Adding Radiobttons to switch selector between different plot
# #############################################################
plt.subplots_adjust(left=0.2)
axcolor = 'lightgoldenrodyellow'
rax = plt.axes([0.02, 0.8, 0.10, 0.15], axisbg=axcolor)
# create dict and list
numlst = ['plot '+str(idx+1) for idx,elem in enumerate(self.axlist)]
## python 2.7 and higher
# self.hzdict = {'plot '+str(idx+1):elem for idx,elem in enumerate(self.axlist)}
## python 2.6 and lower
self.hzdict = dict(('plot '+str(idx+1),elem) for idx,elem in enumerate(self.axlist))
radio = RadioButtons(rax, numlst)
# #############################################################
## Getting a rectangular selector
# #############################################################
toggle_selector.RS = RectangleSelector(self.axlist[0], self.line_select_callback, drawtype='box', useblit=True,button=[1,3],minspanx=5, minspany=5,spancoords='pixels', rectprops = dict(facecolor='red', edgecolor = 'black', alpha=0.2, fill=True))
plt.connect('key_press_event', self.toggle_selector)
return radio, self.hzfunc
def addFlag(data, flagger, indeciestobeflagged, variables):
# INPUT section
print("Provide flag ID (2 or 3):")
print(" -- 2: keep data")
print(" -- 3: remove data")
flagid = raw_input(" -- default: %d \n" % flagger.flagid)
print(flagid)
reason = raw_input("Provide reason: \n")
print(reason)
flagkeys = raw_input("Keys to flag: e.g. x,y,z\n")
if not flagkeys == '':
if ',' in flagkeys:
keylist = flagkeys.split(',')
keylist = [elem for elem in keylist if elem in KEYLIST]
else:
if flagkeys in KEYLIST:
keylist = [flagkeys]
else:
keylist = []
else:
keylist = []
# ANALYSIS section
try:
flagid = int(flagid)
except:
flagid = flagger.flagid
if not flagid in [0,1,2,3,4]:
flagid = flagger.flagid
if reason == '':
reason = flagger.reason
if keylist == []:
keylist = [key for key in flagger.orgkeylist if key in FLAGKEYLIST]
try:
sensid = data.header["SensorID"]
except:
print("plotFlag: Flagging requires SensorID - set with stream.header['SensorID'] = 'MyID'")
sensid = "Dummy_1234_0001"
flaglst = []
for k, g in groupby(enumerate(indeciestobeflagged), lambda ix: ix[0] - ix[1]):
consecutives = map(itemgetter(1), g)
#print consecutives
begintime = num2date(data.ndarray[0][consecutives[0]].astype(float)).replace(tzinfo=None)
endtime = num2date(data.ndarray[0][consecutives[-1]].astype(float)).replace(tzinfo=None)
modtime = datetime.utcnow()
#if option o:
#datastream = datastream._select_timerange(begintime,endtime)
#datastream = datastream.offset({key:value})
#orgstream = orgstream.extend(datastream)
# or orgstream = datastream.extend(orgstream)
#orgstream = orgstream.removeduplicates() ##if only last occurence is used
for key in keylist:
#print begintime, endtime, key, flagid, reason, sensid, modtime
if not sensid == '':
flaglst.append([begintime, endtime, key, flagid, reason, sensid, modtime])
# now flag the data and restart figure
flagger.flag(indeciestobeflagged, flagid, reason, keylist)
# reduce to original keys
orgkeys = flagger.orgkeylist
data = data.selectkeys(orgkeys)
flagger = figFlagger(data, variables)
#flagger.flag(data)
return flagger.idxarray, flaglst
def plotFlag(data,variables=None,figure=False):
'''
DEFINITION:
Creates a plot for flagging.
Rectangular selection is possible and flagging can be conducted.
Several additional keys provide data info.
RETURNS:
- stream: (Datastream) ndarray stream to be saved
optional
- variables: (list of keys)
REQUIRES:
- class figFlagger
EXAMPLE:
>>> flaggedstream = plotFlag(stream)
'''
flaglist = []
flagdata = data.copy()
flagger = figFlagger(flagdata,variables,figure)
indeciestobeflagged = flagger.idxarray
while indeciestobeflagged > 0:
indeciestobeflagged, flaglst = addFlag(flagger.data, flagger, indeciestobeflagged, variables)
flaglist.extend(flaglst)
print("Returning data ....")
try:
print(" -- original format: %s " % data.header['DataFormat'])
except:
pass
orgkeys = flagger.orgkeylist
flagdata = flagger.data.selectkeys(orgkeys)
return flagdata, flaglist
#####################################################################
# End Flagging #
#####################################################################
def plotEMD(stream,key,verbose=False,plottitle=None,
outfile=None,sratio=0.25,max_modes=20,hht=True):
'''
DEFINITION:
NOTE: EXPERIMENTAL FUNCTION ONLY.
Function for plotting Empirical Mode Decomposition of
DataStream. Currently only optional function.
(Adapted from RL code in MagPyAnalysis/NoiseFloor_Spectral/magemd.py.)
PARAMETERS:
Variables:
- stream: (DataStream object) Description.
- key: (str) Key in stream to apply EMD to.
Kwargs:
- outfile: (str) Save plot to file. If no file defined, plot
will simply be shown.
- plottitle: (str) Title to place at top of plot.
- sratio: (float) Decomposition percentage. Determines how curve
is split. Default = 0.25.
- verbose: (bool) Print results. Default False.
RETURNS:
- plot: (matplotlib plot) Plot depicting the modes.
EXAMPLE:
>>> plotEMDAnalysis(stream,'x')
APPLICATION:
'''
# TODO:
# - make axes easier to read
# - add a amplitude statistic (histogram)
# - add a haeufigkeit plot perpendicular to the diagrams
import magpy.opt.emd as emd # XXX: add this into main program when method is finalised
loggerplot.info("plotEMD: Starting EMD calculation.")
col = stream._get_column(key)
timecol = stream._get_column('time')
if verbose:
print("Amount of values and standard deviation:", len(col), np.std(col))
res = emd.emd(col,max_modes=max_modes)
if verbose:
print("Found the following amount of decomposed modes:", len(res))
separate = int(np.round(len(res)*sratio,0))
if verbose:
print("Separating the last N curves as smooth. N =",separate)
stdarray = []
newcurve = [0]*len(res[0])
noisecurve = [0]*len(res[0])
midcurve = [0]*len(res[0])
smoothcurve = [0]*len(res[0])
f, axarr = plt.subplots(len(res), sharex=True)
for i, elem in enumerate(res):
axarr[i].plot(elem)
newcurve = [x + y for x, y in zip(newcurve, elem)]
stdarray.append([i,np.std(elem)])
ds = stream
ds._put_column(elem,'x')
ds._put_column(timecol,'time')
"""
if i >= len(res)-separate:
if verbose:
print "Smooth:", i
smoothcurve = [x + y for x, y in zip(smoothcurve, elem)]
if i < len(res)-separate:
if verbose:
print "Noise:", i
noisecurve = [x + y for x, y in zip(noisecurve, elem)]
"""
if i >= 15:
if verbose:
print("Smooth:", i)
smoothcurve = [x + y for x, y in zip(smoothcurve, elem)]
if 8 <= i < 14:
if verbose:
print("Mid:", i)
midcurve = [x + y for x, y in zip(midcurve, elem)]
if 2 < i < 8:
if verbose:
print("Noise:", i)
noisecurve = [x + y for x, y in zip(noisecurve, elem)]
plt.show()
plt.plot(smoothcurve)
#plt.plot(newcurve)
plt.title("Variation of IMF 14 to 17 component - low frequency content")
plt.xlabel("Time [15 min counts]")
plt.ylabel("Counts/min")
plt.legend()
plt.show()
plt.plot(noisecurve)
plt.title("Variation of IMF 1 to 8 component - high frequency content")
plt.xlabel("Time [15 min counts]")
plt.ylabel("Counts/min")
plt.show()
plt.plot(midcurve)
plt.title("Variation of IMF 9 to 12 - mid frequency content")
plt.xlabel("Time [15 min counts]")
plt.ylabel("Counts/min")
plt.show()
plt.close()
stdarray = np.asarray(stdarray)
ind = stdarray[:,0]
val = stdarray[:,1]
plt.bar(ind,val)
plt.title("Standard deviation of EMD modes")
plt.xlabel("EMD mode")
plt.ylabel("Standard deviation [nT]")
plt.show()
if hht:
print(emd.calc_inst_info(res,stream.samplingrate()))
def plotNormStreams(streamlist, key, normalize=True, normalizet=False,
normtime=None, bgcolor='white', colorlist=colorlist, noshow=False,
outfile=None, plottitle=None, grid=True, gridcolor=gridcolor,
labels=None, legendposition='upper right',labelcolor=labelcolor,
returndata=False,confinex=False,savedpi=80):
'''
DEFINITION:
Will plot normalised streams. Streams will be normalized to a general
median or to the stream values at a specific point in time.
Useful for directly comparing streams in different locations.
PARAMETERS:
Variables:
- streamlist: (list) A list containing the streams to be plotted.
e.g.:
[ stream1, stream2, etc...]
[ lemi1, lemi2, lemi3 ]
- key: (str) Variable to be compared
'f'
Args:
- bgcolor: (color='white') Background colour of plot.
- colorlist: (list(colors)) List of colours to plot with.
Default = ['b','g','m','c','y','k','b','g','m','c','y','k']
- grid: (bool=True) If True, will plot grid.
- gridcolor: (color='#316931') Colour of grid.
#- labelcolor: (color='0.2') Colour of labels.
- labels: (list) Insert labels and legend for each stream, e.g.:
['WIC', 'WIK', 'OOP']
- legendposition: (str) Position of legend. Default = "upper right"
- outfile: (str) Path of file to plot figure to.
- normalize: (bool) If True, variable will be normalized to 0. Default = True.
- normalizet: (bool) If True, time variable will be normalized to 0. Default = False
- normtime: (datetime object/str) If streams are to be normalized, normtime
is the time to use as a reference.
- noshow: (bool) Will return figure object at end if True, otherwise only plots
- plottitle: (str) Title to put at top of plot.
#- plottype: (NumPy str='discontinuous') Can also be 'continuous'.
- returndata: (bool) If True, will return normalised data arrays. Default = False.
#- savedpi: (float=80) Determines dpi of outfile.
RETURNS:
- plot: (Pyplot plot) Returns plot as plt.show or saved file
if outfile is specified.
EXAMPLE:
>>>
'''
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
arraylist = []
if labels:
if len(labels) != len(streamlist):
loggerplot.warning("plotNormStreams: Number of labels does not match number of streams!")
for i, stream in enumerate(streamlist):
loggerplot.info("plotNormStreams: Adding stream %s of %s..." % ((i+1),len(streamlist)))
y = stream._get_column(key)
t = stream._get_column('time')
xlabel = "Time (UTC)"
color = colorlist[i]
if len(y) == 0:
loggerplot.error("plotNormStreams: stream with empty array!")
return
try:
yunit = stream.header['unit-col-'+key]
except:
yunit = ''
#ylabel = stream.header['col-'+key].upper()+' $['+re.sub('[#$%&~_^\{}]', '', yunit)+']$'
ylabel = stream.header['col-'+key].upper()+' $['+re.sub('[#$%&~_\{}]', '', yunit)+']$'
# NORMALIZE VARIABLE:
if normalize:
if normtime:
if type(normtime) == list:
normtime_start, normtime_end = test_time(normtime[0]), test_time(normtime[1])
normarea = stream.trim(normtime_start,normtime_end,newway=True)
normvalue = normarea.mean(key,meanfunction='median')
else:
normtime = test_time(normtime)
val, idx = find_nearest(t,date2num(normtime))
normvalue = y[idx]
else:
normvalue = np.median(y)
y = y - normvalue
ylabel = "normalized "+ylabel
# NORMALIZE TIME:
if normalizet:
if normtime:
zerotime = normtime
else:
zerotime = t[0]
t = t - zerotime
xlabel = "normalized "+xlabel
if returndata:
arraylist.append([t,y])
# CONFINE X
timeunit = ''
if confinex:
tmin = np.min(t)
tmax = np.max(t)
# --> If dates to be confined, set value types:
_confinex(ax, tmax, tmin, timeunit)
ax.set_xlabel("Time (UTC) %s" % timeunit, color=labelcolor)
# PLOT DATA:
if labels:
ax.plot(t,y,color+'-',label=labels[i])
else:
ax.plot(t,y,color+'-')
# ADD GRID:
if grid:
ax.grid(True,color=gridcolor,linewidth=0.5)
# SET LABELS:
ax.set_xlabel(xlabel, color=labelcolor)
ax.set_ylabel(ylabel, color=labelcolor)
ax.set_title(plottitle)
ax.set_xlim([t[0],t[-1]])
# INSERT LEGEND:
if labels:
legend = ax.legend(loc=legendposition, shadow=True)
for label in legend.get_texts():
label.set_fontsize('small')
# FINALISE PLOT:
if noshow == True and returndata == True:
return [fig, arraylist]
elif noshow == False and returndata == True:
return arraylist
elif noshow == True and returndata == False:
return fig
else:
if outfile:
plt.savefig(outfile,dpi=savedpi)
else:
plt.show()
def plotPS(stream,key,debugmode=False,outfile=None,noshow=False,
returndata=False,freqlevel=None,marks={},fmt=None,
axes=None,plottitle=None,**kwargs):
"""
DEFINITION:
Calculate the power spectrum following the numpy fft example
and plot the results.
PARAMETERS:
Variables:
- stream: (DataStream object) Stream to analyse
- key: (str) Key to analyse
Kwargs:
- axes: (?) ?
- debugmode: (bool) Variable to show steps
- fmt: (str) Format of outfile, e.g. "png"
- freqlevel: (float) print noise level at that frequency.
- marks: (dict) Contains list of marks to add, e.g:
{'here',1}
- outfile: (str) Filename to save plot to
- plottitle: (str) Title to display on plot
- returndata: (bool) Return frequency and asd
RETURNS:
- plot: (matplotlib plot) A plot of the powerspectrum
If returndata = True:
- freqm: (float) Maximum frequency
- asdm: (float) ?
EXAMPLE:
>>> plotPS(stream, 'x')
OR
>>> freq, a = plotPS(stream, 'x', returndata=True)
APPLICATION:
>>> import magpy
1. Requires DataStream object:
>>> data_path = '/usr/lib/python2.7/magpy/examples/*'
>>> data = read(path_or_url=data_path,
starttime='2013-06-10 00:00:00',
endtime='2013-06-11 00:00:00')
2. Call for data stream:
>>> data.powerspectrum('f',
plottitletitle='PSD of f', marks={'day':0.000011574},
outfile='ps.png')
"""
loggerplot.info("plotPS: Starting powerspectrum calculation.")
if noshow:
show = False
else:
show = True
dt = stream.get_sampling_period()*24*3600
if not stream.length()[0] > 0:
loggerplot.error("plotPS: Stream of zero length -- aborting.")
raise Exception("Can't analyse power spectrum of stream of zero length!")
if len(stream.ndarray[0]) > 0:
pos = KEYLIST.index(key)
t = stream.ndarray[0]
val = stream.ndarray[pos]
else:
t = np.asarray(stream._get_column('time'))
val = np.asarray(stream._get_column(key))
t_min = np.min(t)
t_new, val_new = [],[]
nfft = int(nearestPow2(len(t)))
if nfft > len(t):
nfft = int(nearestPow2(len(t) / 2.0))
for idx, elem in enumerate(val):
if not isnan(elem):
t_new.append((t[idx]-t_min)*24*3600)
val_new.append(elem)
t_new = np.asarray(t_new)
val_new = np.asarray(val_new)
if debugmode:
print("Extracted data for powerspectrum at %s" % datetime.utcnow())
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
psdm = mlab.psd(val_new, nfft, 1/dt)
asdm = np.sqrt(psdm[0])
freqm = psdm[1]
ax.loglog(freqm, asdm,'b-')
if debugmode:
print("Maximum frequency:", max(freqm))
if freqlevel:
val, idx = find_nearest(freqm, freqlevel)
if debugmode:
print("Maximum Noise Level at %s Hz: %s" % (val,asdm[idx]))
if not marks:
pass
else:
for elem in marks:
ax.annotate(elem, xy=(marks[elem],min(asdm)),
xytext=(marks[elem],max(asdm)-(max(asdm)-min(asdm))*0.3),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
try:
unit = stream.header['unit-col-'+key]
except:
unit = 'unit'
ax.set_xlabel('Frequency $[Hz]$')
ax.set_ylabel(('Amplitude spectral density $[%s/sqrt(Hz)]$') % unit)
if plottitle:
ax.set_title(plottitle)
loggerplot.info("Finished powerspectrum.")
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif returndata:
return freqm, asdm
elif show:
plt.draw() # show() should only ever be called once. Use draw() in between!
else:
return fig
def plotSatMag(mag_stream,sat_stream,keys,outfile=None,plottype='discontinuous',
padding=5,plotfunc=True,confinex=True,labelcolor=labelcolor,savedpi=80,
plotcolors=['#000066', '#C0C0C0'], plottitle=None,legend=True,
legendlabels=['Magnetic data','Satellite data'], grid=True,specialdict={},
annotate=False,returnfig=False):
"""
DEFINITION:
Plot satellite and magnetic data on same plot for storm comparison.
Currently only plots 1x mag variable vs. 1x sat variable.
PARAMETERS:
Variables:
- mag_stream: (DataStream object) Stream of magnetic data
- sat_stream: (DataStream object) Stream of satellite data
- keys: (list) Keys to analyse [mag_key,sat_key], e.g. ['x','y']
Kwargs:
- annotate: (bool) If True, comments in flagged stream lines will be annotated into plot.
- confinex: (bool) If True, time strs on y-axis will be confined depending on scale.
- grid: (bool) If True, grid will be added to plot. (Doesn't work yet!)
- legend: (bool) If True, legend will be added to plot. Default in legendlabels.
- legendlabels: (list[str]) List of labels to plot in legend.
- outfile: (str) Filepath to save plot to.
- padding: (float) Padding to add to plotted variables
- plotcolors: (list) List of colors for (0) mag data and (1) sat data lines
- plotfunc: (bool) If True, fit function will be plotted against sat data.
- plottitle: (str) Title to add to plot
- plottype: (str) 'discontinuous' (nans will be masked) or 'continuous'.
- returnfig: (bool) Return figure object if True
- savedpi: (int) DPI of image if plotting to outfile.
- specialdict: (dict) Contains limits for plot axes in list form. NOTE this is not the
same as other specialdicts. Dict keys should be "sat" and "mag":
specialdict = {'mag':[40,100],'sat':[300,450]}
RETURNS:
- plot: (matplotlib plot) A plot of the spectrogram.
EXAMPLE:
>>> plotSatMag(LEMI_data, ACE_data, ['x','y'])
APPLICATION:
>>>
"""
loggerplot.info("plotSatMag - Starting plotting of satellite and magnetic data...")
key_mag, key_sat = keys[0], keys[1]
ind_mag, ind_sat, ind_t = KEYLIST.index(key_mag), KEYLIST.index(key_sat), KEYLIST.index('time')
if len(mag_stream.ndarray) > 0.:
t_mag = mag_stream.ndarray[ind_t]
t_sat = sat_stream.ndarray[ind_t]
else:
t_mag = np.asarray([row[0] for row in mag_stream])
t_sat = np.asarray([row[0] for row in sat_stream])
if key_mag not in KEYLIST:
raise Exception("Column key (%s) not valid!" % key)
if key_sat not in KEYLIST:
raise Exception("Column key (%s) not valid!" % key)
if len(mag_stream.ndarray) > 0.:
y_mag = mag_stream.ndarray[ind_mag]
y_sat = sat_stream.ndarray[ind_sat]
else:
y_mag = np.asarray([row[ind_mag] for row in mag_stream])
y_sat = np.asarray([row[ind_sat] for row in sat_stream])
# Fix if NaNs are present:
if plottype == 'discontinuous':
y_mag = maskNAN(y_mag)
y_sat = maskNAN(y_sat)
else:
nans, test = nan_helper(y_mag)
newt_mag = [t_mag[idx] for idx, el in enumerate(y_mag) if not nans[idx]]
t_mag = newt_mag
y_mag = [el for idx, el in enumerate(y_mag) if not nans[idx]]
nans, test = nan_helper(y_sat)
newt_sat = [t_sat[idx] for idx, el in enumerate(y_sat) if not nans[idx]]
t_sat = newt_sat
y_sat = [el for idx, el in enumerate(y_sat) if not nans[idx]]
if (len(y_sat) == 0 or len(y_mag)) == 0:
loggerplot.error("plotSatMag - Can't plot empty column! Full of nans?")
raise Exception("plotSatMag - Empty column!")
# Define y-labels:
try:
ylabel_mag = mag_stream.header['col-'+key_mag].upper()
except:
ylabel_mag = ''
pass
try:
ylabel_sat = sat_stream.header['col-'+key_sat].upper()
except:
ylabel_sat = ''
pass
try:
yunit_mag = mag_stream.header['unit-col-'+key_mag]
except:
yunit_mag = ''
pass
if not yunit_mag == '':
yunit_mag = re.sub('[#$%&~_^\{}]', '', yunit_mag)
label_mag = ylabel_mag+' $['+yunit_mag+']$'
else:
label_mag = ylabel_mag
try:
yunit_sat = sat_stream.header['unit-col-'+key_sat]
except:
yunit_sat = ''
pass
if not yunit_sat == '':
yunit_sat = re.sub('[#$%&~_^\{}]', '', yunit_sat)
label_sat = ylabel_sat+' $['+yunit_sat+']$'
else:
label_sat = ylabel_sat
# PLOT FIGURE
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_ylabel(label_sat,color=labelcolor)
axis1 = ax1.plot_date(t_sat, y_sat, fmt='-', color=plotcolors[1],label=legendlabels[1])
timeunit = ''
if confinex:
tmin = np.min(t_mag)
tmax = np.max(t_mag)
# --> If dates to be confined, set value types:
_confinex(ax1, tmax, tmin, timeunit)
ax1.set_xlabel("Time (UTC) %s" % timeunit, color=labelcolor)
if plottitle:
ax1.set_title(plottitle)
# NOTE: For mag data to be above sat data in zorder, KEEP THIS AXIS ORDER
# (twinx() does not play nicely with zorder settings)
ax2 = ax1.twinx()
axis2 = ax2.plot_date(t_mag, y_mag, fmt='-', lw=2, color=plotcolors[0],label=legendlabels[0])
ax2.set_ylabel(label_mag,color=labelcolor)
ax2.yaxis.set_label_position('left')
ax2.yaxis.set_ticks_position('left')
ax1.yaxis.set_label_position('right')
ax1.yaxis.tick_right()
# Define y limits:
if 'mag' in specialdict:
ax2.set_ylim(specialdict['mag'][0],specialdict['mag'][1])
else:
ax2.set_ylim(np.min(y_mag)-padding,np.max(y_mag)+padding)
if 'sat' in specialdict:
ax1.set_ylim(specialdict['sat'][0],specialdict['sat'][1])
else:
ax1.set_ylim(np.min(y_sat)-padding,np.max(y_sat)+padding)
# Add a grid:
# Difficult with a legend and twinx()...
#if grid:
# ax1.grid(zorder=2)
# ax2.grid(zorder=1)
# ax1.yaxis.grid(False)
# Add a legend:
if legend == True:
axes = axis2 + axis1
labels = [l.get_label() for l in axes]
legend = ax1.legend(axes, labels, loc='upper left', shadow=True)
for label in legend.get_texts():
label.set_fontsize('small')
if annotate == True:
flags = mag_stream.flags
emptycomment = "-"
poslst = [ix for ix,el in enumerate(FLAGKEYLIST) if el == key]
indexflag = int(poslst[0])
for idx, elem in enumerate(flags[1]):
if not elem == emptycomment and flags[0][idx][indexflag] in ['0','3']:
ax.annotate(r'%s' % (elem),
xy=(t[idx], y[idx]),
xycoords='data', xytext=(20, 20),
textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# Plot a function to the satellite data:
if plotfunc:
sat_stream._drop_nans('y')
func = sat_stream.fit(['y'],knotstep=0.02)
fkey = 'f'+key_sat
if fkey in func[0]:
ttmp = arange(0,1,0.0001)
ax1.plot_date(denormalize(ttmp,func[1],func[2]),func[0][fkey](ttmp),
'-',color='gray')
if returnfig == True:
fig = plt.gcf()
return fig
if outfile:
plt.savefig(outfile,savedpi=80)
loggerplot.info("plotSatMag - Plot saved to %s." % outfile)
else:
plt.show()
loggerplot.info("plotSatMag - Plot completed.")
def plotSpectrogram(stream, keys, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
samp_rate_multiplicator=None,mult=8.0, cmap=None,zorder=None,
plottitle=None, show=True, sphinx=False, clip=[0.0, 1.0], **kwargs):
"""
DEFINITION:
Creates a spectrogram plot of selected keys.
Parameter description at function obspyspectrogram
PARAMETERS:
Variables:
- stream: (DataStream object) Stream to analyse
- keys: (list) Keys to analyse
Kwargs:
- per_lap: (?) ?
- wlen: (?) ?
- log: (bool) ?
- outfile: (str) Filename to save plot to
- fmt: (str) Format of outfile, e.g. 'png'
- axes: (?) ?
- dbscale: (?) ?
- mult: (?) ?
- cmap: (?) ?
- zorder: (?) ?
- plottitle: (?) ?
- samp_rate_multiplicator:
(float=24*3600) Change the frequency relative to one day
sampling rate given as days -> multiplied by x to create Hz,
Default 24, which means 1/3600 Hz
- show: (?) ?
- sphinx: (?) ?
RETURNS:
- plot: (matplotlib plot) A plot of the spectrogram.
EXAMPLE:
>>> plotSpectrogram(stream, ['x','y'])
APPLICATION:
>>>
"""
if not samp_rate_multiplicator:
samp_rate_multiplicator = 24*3600
t = stream._get_column('time')
if not len(t) > 0:
loggerplot.error('plotSpectrogram: stream of zero length -- aborting')
return
for key in keys:
val = stream._get_column(key)
val = maskNAN(val)
dt = stream.get_sampling_period()*(samp_rate_multiplicator)
Fs = float(1.0/dt)
obspySpectrogram(val,Fs, per_lap=per_lap, wlen=wlen, log=log,
outfile=outfile, fmt=fmt, axes=axes, dbscale=dbscale,
mult=mult, cmap=cmap, zorder=zorder, title=plottitle, show=show,
sphinx=sphinx, clip=clip)
def obspySpectrogram(data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0]):
"""
Function taken from ObsPy
Computes and plots spectrogram of the input data.
:param data: Input data
:type samp_rate: float
:param samp_rate: Samplerate in Hz
:type per_lap: float
:param per_lap: Percentage of overlap of sliding window, ranging from 0
to 1. High overlaps take a long time to compute.
:type wlen: int or float
:param wlen: Window length for fft in seconds. If this parameter is too
small, the calculation will take forever.
:type log: bool
:param log: Logarithmic frequency axis if True, linear frequency axis
otherwise.
:type outfile: String
:param outfile: String for the filename of output file, if None
interactive plotting is activated.
:type fmt: String
:param fmt: Format of image to save
:type axes: :class:`matplotlib.axes.Axes`
:param axes: Plot into given axes, this deactivates the fmt and
outfile option.
:type dbscale: bool
:param dbscale: If True 10 * log10 of color values is taken, if False the
sqrt is taken.
:type mult: float
:param mult: Pad zeros to lengh mult * wlen. This will make the spectrogram
smoother. Available for matplotlib > 0.99.0.
:type cmap: :class:`matplotlib.colors.Colormap`
:param cmap: Specify a custom colormap instance
:type zorder: float
:param zorder: Specify the zorder of the plot. Only of importance if other
plots in the same axes are executed.
:type title: String
:param title: Set the plot title
:type show: bool
:param show: Do not call `plt.show()` at end of routine. That way, further
modifications can be done to the figure before showing it.
:type sphinx: bool
:param sphinx: Internal flag used for API doc generation, default False
:type clip: [float, float]
:param clip: adjust colormap to clip at lower and/or upper end. The given
percentages of the amplitude range (linear or logarithmic depending
on option `dbscale`) are clipped.
"""
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
npts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
#XXX add condition for too many windows => calculation takes for ever
nfft = int(nearestPow2(wlen * samp_rate))
if nfft > npts:
print(npts)
nfft = int(nearestPow2(npts / 8.0))
if mult != None:
mult = int(nearestPow2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.mean()
end = npts / samp_rate
# Here we call not plt.specgram as this already produces a plot
# matplotlib.mlab.specgram should be faster as it computes only the
# arrays
# XXX mlab.specgram uses fft, would be better and faster use rfft
if MATPLOTLIB_VERSION >= [0, 99, 0]:
print("1", nfft, nlap)
# TODO: ERROR IS IN HERE
#nfft = 256
#nlap = 128
# Default values don't help...
specgram, freq, time = mlab.specgram(data, Fs=samp_rate,
NFFT=nfft, noverlap=nlap)
print("2")
else:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate,
NFFT=nfft, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * np.log10(specgram[1:, :])
else:
specgram = np.sqrt(specgram[1:, :])
freq = freq[1:]
vmin, vmax = clip
if vmin < 0 or vmax > 1 or vmin >= vmax:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.max() - specgram.min())
vmin = specgram.min() + vmin * _range
vmax = specgram.min() + vmax * _range
norm = Normalize(vmin, vmax, clip=True)
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
# calculate half bin width
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freq[1] - freq[0]) / 2.0
if log:
# pcolor expects one bin more at the right end
freq = np.concatenate((freq, [freq[-1] + 2 * halfbin_freq]))
time = np.concatenate((time, [time[-1] + 2 * halfbin_time]))
# center bin
time -= halfbin_time
freq -= halfbin_freq
# pcolormesh issue was fixed in matplotlib r5716 (2008-07-07)
# inbetween tags 0.98.2 and 0.98.3
# see:
# - http://matplotlib.svn.sourceforge.net/viewvc/...
# matplotlib?revision=5716&view=revision
# - http://matplotlib.sourceforge.net/_static/CHANGELOG
if MATPLOTLIB_VERSION >= [0, 98, 3]:
# Log scaling for frequency values (y-axis)
ax.set_yscale('log')
# Plot times
ax.pcolormesh(time, freq, specgram, cmap=cmap, zorder=zorder,
norm=norm)
else:
X, Y = np.meshgrid(time, freq)
ax.pcolor(X, Y, specgram, cmap=cmap, zorder=zorder, norm=norm)
ax.semilogy()
else:
# this method is much much faster!
specgram = np.flipud(specgram)
# center bin
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freq[0] - halfbin_freq, freq[-1] + halfbin_freq)
ax.imshow(specgram, interpolation="nearest", extent=extent,
cmap=cmap, zorder=zorder)
# set correct way of axis, whitespace before and after with window
# length
ax.axis('tight')
ax.set_xlim(0, end)
ax.grid(False)
if axes:
return ax
ax.set_xlabel('Time [s]')
ax.set_ylabel('Frequency [Hz]')
if title:
ax.set_title(title)
if not sphinx:
# ignoring all NumPy warnings during plot
temp = np.geterr()
np.seterr(all='ignore')
plt.draw()
np.seterr(**temp)
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif show:
plt.show()
else:
return fig
def plotStereoplot(stream,focus='all',colorlist = ['b','r','g','c','m','y','k'],
bgcolor='#d5de9c',griddeccolor='#316931',gridinccolor='#316931',
savedpi=80,legend=True,labellimit=11,legendposition="lower left",
figure=False,noshow=False,plottitle=None,groups=None,outfile=None,**kwargs):
"""
DEFINITION:
Plots declination and inclination values in stereographic projection.
Will abort if no idff typ is provided
Full circles denote positive inclinations, open negative
PARAMETERS:
Variables:
- stream (DataStream) a magpy datastream object
Kwargs:
- bgcolor: (colour='#d5de9c') Background colour
- figure: (bool) Show figure if True
- focus: (str) defines the plot area. Options:
all (default) - -90 to 90 deg inc, 360 deg dec
q1 - first quadrant
q2 - first quadrant
q3 - first quadrant
q4 - first quadrant
data - focus on data (if angular spread is less then 10 deg
- gridcolor: (str) Define grid color e.g. '0.5' greyscale, 'r' red, etc
- griddeccolor: (colour='#316931') Grid colour for inclination
- gridinccolor: (colour='#316931') Grid colour for declination
- groups (KEY) - key of keylist which defines color of points
(e.g. ('str2') in absolutes to select
different colors for different instruments
- legend: (bool) - draws legend only if groups is given - default True
- legendposition:
(str) - draws the legend at chosen position,
(e.g. "upper right", "lower center") - default is "lower left"
- labellimit: (int)- maximum length of label in legend
- noshow: (bool) If True, will not call show at the end,
- outfile: (str) to save the figure, if path is not existing it will be created
- savedpi: (int) resolution
- plottitle: (str) Title at top of plot
REQUIRES:
- package operator for color selection
RETURNS:
- plot: (matplotlib plot) The stereoplot.
ToDo:
- add alpha 95 calc
EXAMPLE:
>>> stream.stereoplot(focus='data',groups='str2')
APPLICATION:
>>>
"""
loggerplot.info('plotStereoplot: Starting plot of stereoplot.')
if not stream[0].typ == 'idff':
loggerplot.error('plotStereoplot: idf data required for stereoplot.')
raise Exception("Idf data required for plotting a stereoplot!")
inc = stream._get_column('x')
dec = stream._get_column('y')
col = ['']
if groups:
sel = stream._get_column(groups)
col = list(set(list(sel)))
if len(col) > 7:
col = col[:7]
if not len(dec) == len(inc):
loggerplot.error('plotStereoplot: Check input file - unequal inc and dec data?')
return
if not figure:
fig = plt.figure()
else:
fig = figure
ax = plt.gca()
ax.cla() # clear things for fresh plot
ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# Define coordinates:
basic1=plt.Circle((0,0),90,color=bgcolor,fill=True)
basic1a=plt.Circle((0,0),90,color=gridinccolor,fill=False)
basic2=plt.Circle((0,0),30,color=gridinccolor,fill=False,linestyle='dotted')
basic3=plt.Circle((0,0),60,color=gridinccolor,fill=False,linestyle='dotted')
basic4=plt.Line2D([0,0],[-90,90],color=griddeccolor,linestyle='dashed')
basic5=plt.Line2D([-90,90],[0,0],color=griddeccolor,linestyle='dashed')
fig.gca().add_artist(basic1)
fig.gca().add_artist(basic1a)
fig.gca().add_artist(basic2)
fig.gca().add_artist(basic3)
fig.gca().add_artist(basic4)
fig.gca().add_artist(basic5)
for j in range(len(col)):
color = colorlist[j]
xpos,ypos,xneg,yneg,xabs,y = [],[],[],[],[],[]
for i,el in enumerate(inc):
if groups:
if sel[i] == col[j]:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
else:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
xmax = np.ceil(max(xabs))
xmin = np.floor(min(xabs))
xdif = xmax-xmin
ymax = np.ceil(max(y))
ymin = np.floor(min(y))
ydif = ymax-ymin
maxdif = max([xdif,ydif])
mindec = np.floor(min(dec))
maxdec = np.ceil(max(dec))
mininc = np.floor(min(np.abs(inc)))
maxinc = np.ceil(max(np.abs(inc)))
if focus == 'data' and maxdif <= 10:
# decs
startdec = mindec
decline,inclst = [],[]
startinc = mininc
incline = []
while startdec <= maxdec:
xl = 90*np.sin(np.pi/180*startdec)
yl = 90*np.cos(np.pi/180*startdec)
decline.append([xl,yl,startdec])
startdec = startdec+1
while startinc <= maxinc:
inclst.append(90-np.abs(startinc))
startinc = startinc+1
if focus == 'all':
ax.set_xlim((-90,90))
ax.set_ylim((-90,90))
if focus == 'q1':
ax.set_xlim((0,90))
ax.set_ylim((0,90))
if focus == 'q2':
ax.set_xlim((-90,0))
ax.set_ylim((0,90))
if focus == 'q3':
ax.set_xlim((-90,0))
ax.set_ylim((-90,0))
if focus == 'q4':
ax.set_xlim((0,90))
ax.set_ylim((-90,0))
if focus == 'data':
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
#ax.annotate('Test', xy=(1.2, 25.2))
ax.plot(xpos,ypos,'o',color=color, label=col[j][:labellimit])
ax.plot(xneg,yneg,'o',color='white')
ax.annotate('60', xy=(0, 30))
ax.annotate('30', xy=(0, 60))
ax.annotate('0', xy=(0, 90))
ax.annotate('90', xy=(90, 0))
ax.annotate('180', xy=(0, -90))
ax.annotate('270', xy=(-90, 0))
if focus == 'data' and maxdif <= 10:
for elem in decline:
pline = plt.Line2D([0,elem[0]],[0,elem[1]],color=griddeccolor,linestyle='dotted')
xa = elem[0]/elem[1]*((ymax - ymin)/2+ymin)
ya = (ymax - ymin)/2 + ymin
annotext = "D:%i" % int(elem[2])
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pline)
for elem in inclst:
pcirc = plt.Circle((0,0),elem,color=gridinccolor,fill=False,linestyle='dotted')
xa = (xmax-xmin)/2 + xmin
ya = sqrt((elem*elem)-(xa*xa))
annotext = "I:%i" % int(90-elem)
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pcirc)
if groups and legend:
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=legendposition)
if plottitle:
ax.set_title(plottitle)
# SAVE TO FILE (or show)
if figure:
return ax
if outfile:
path = os.path.split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
#####################################################################
# #
# INTERNAL/HELPER FUNCTIONS #
# (Best not play with these.) #
# #
#####################################################################
def _plot(data,savedpi=80,grid=True,gridcolor=gridcolor,noshow=False,
bgcolor='white',plottitle=None,fullday=False,bartrange=0.06,
labelcolor=labelcolor,confinex=False,outfile=None,stormanno_s=True,
stormanno_m=True,stormanno_r=True,fmt=None,figure=False,fill=[],
legendposition='upper left',singlesubplot=False,opacity=1.0):
'''
For internal use only. Feed a list of dictionaries in here to plot.
Every dictionary should contain all data needed for one single subplot.
DICTIONARY STRUCTURE FOR EVERY SUBPLOT:
[ { ***REQUIRED***
'key' : 'x' (str) MagPy key
'tdata' : t (np.ndarray) Time
'ydata' : y (np.ndarray) Data y(t)
'ymin' : ymin (float) Minimum of y-axis
'ymax' : ymax (float) Maximum of y-axis
'symbol': '-' (str) Symbol for plotting, '-' = line
'color' : 'b' (str) Colour of plotted line
'ylabel': 'F [nt]' (str) Label on y-axis
'annotate': False (bool) If this is True, must have 'flags' key
'sensorid': 'LEMI025' (str) String pulled from header data. If available,
will be plotted alongside data for clarity.
OPTIONAL:
'errorbars': eb (np.ndarray) Errorbars to plot in subplot
'flags' : flags (np.ndarray) Flags to add into subplot.
Note: must be 2-dimensional, flags & comments.
'function': fn (function object) Plot a function within the subplot.
} ,
{ 'key' : ... } ... ]
GENERAL VARIABLES:
plottitle = "Data from 2014-05-02"
confinex = False
bgcolor = 'blue'
etc. ... (all are listed in plot() and plotStreams() functions)
figure -- for GUI
fill = ['x']
'''
if not figure:
fig = plt.figure()
else:
fig = figure
# CREATE MATPLOTLIB FIGURE OBJECT:
#fig = plt.figure()
plt_fmt = ScalarFormatter(useOffset=False)
n_subplots = len(data)
for i in range(n_subplots):
subplt = "%d%d%d" %(n_subplots,1,i+1)
if singlesubplot:
subplt = "111"
#------------------------------------------------------------
# PART 1: Dealing with data
#------------------------------------------------------------
# DEFINE DATA:
key = data[i]['key']
t = np.asarray(data[i]['tdata']).astype(float)
y = np.asarray(data[i]['ydata']).astype(float)
if not len(t) == len(y):
y = [99999]*len(t)
# Sort data before plotting - really necessary ? costs 0.1 seconds for 1 day second data
#datar = sorted([[t[j],y[j]] for j, el in enumerate(t)])
#t = [datar[j][0] for j, el in enumerate(datar)]
#y = [datar[j][1] for j, el in enumerate(datar)]
color = data[i]['color']
symbol = data[i]['symbol']
datalabel = data[i]['datalabel']
# CREATE SUBPLOT OBJECT & ADD TITLE:
loggerplot.info("_plot: Adding subplot for key %s..." % data[i]['ylabel'])
if i == 0:
ax = fig.add_subplot(subplt, axisbg=bgcolor)
if plottitle:
ax.set_title(plottitle)
a = ax
else:
ax = fig.add_subplot(subplt, sharex=a, axisbg=bgcolor)
# PLOT DATA:
# --> If bars are in the data (for e.g. k-index):
if symbol == 'z':
xy = range(9)
for num in range(len(t)):
if bartrange < t[num] < np.max(t)-bartrange:
ax.fill([t[num]-bartrange,t[num]+bartrange,t[num]+bartrange,t[num]-
bartrange],[0,0,y[num]+0.1,y[num]+0.1],
facecolor=cm.RdYlGn((9-y[num])/9.,1),alpha=opacity,edgecolor='k')
if datalabel != '':
ax.plot_date(t,y,color+'|',label=datalabel,alpha=opacity)
else:
ax.plot_date(t,y,color+'|',alpha=opacity)
# --> Otherwise plot as normal:
else:
if datalabel != '':
ax.plot_date(t,y,color+symbol,label=datalabel)
else:
ax.plot_date(t,y,color+symbol)
if key in fill:
ax.fill_between(t,0,y,color=color,alpha=opacity)
# PLOT A LEGEND
if datalabel != '':
legend = ax.legend(loc=legendposition, shadow=True)
for label in legend.get_texts():
label.set_fontsize('small')
# DEFINE MIN AND MAX ON Y-AXIS:
ymin = data[i]['ymin']
ymax = data[i]['ymax']
# PLOT ERROR BARS (if available):
if 'errors' in data[i]:
errorbars = data[i]['errors']
ax.errorbar(t,y,yerr=errorbars,fmt=color+'o')
# ANNOTATE:
if data[i]['annotate'] == True:
flags = data[i]['flags']
emptycomment = "-"
indexflag = KEYLIST.index(key)
# identfy subsequent idx nums in flags[1]
a_t,a_y,b_t,b_y,c_t,c_y,d_t,d_y = [],[],[],[],[],[],[],[]
if len(flags[1]) > 0:
# 1. get different comments
tmp = DataStream()
uniqueflags = tmp.union(flags[1])
#print "Flags", flags,uniqueflags, key
for fl in uniqueflags:
#print "Flag", fl
#if fl in ['-','']:
# break
#print fl
# 1. get all indicies of this comment
flagindicies = []
for idx, elem in enumerate(flags[1]):
if not elem == '' and elem == fl:
flagindicies.append(idx)
#print "IDX", np.asarray(flagindicies)
# 2. get consecutive groups
for k, g in groupby(enumerate(flagindicies), lambda ix: ix[0] - ix[1]):
consecutives = map(itemgetter(1), g)
#print "Cons", np.asarray(consecutives)
# 3. add annotation arrow for all but 1
cnt0 = consecutives[0]
#print(consecutives)
#print cnt0, indexflag, flags[0], flags[0][cnt0], flags[1][cnt0], flags[0][cnt0][indexflag]
if len(flags[0][cnt0]) >= indexflag:
if not flags[0][cnt0][indexflag] in ['1','-'] and not flags[1][cnt0] == '-':
ax.annotate(r'%s' % (flags[1][cnt0]),
xy=(t[cnt0], y[cnt0]),
xycoords='data', xytext=(20, 20),
textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.9"),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1, connectionstyle="angle,angleA=0,angleB=90,rad=10"))
for idx in consecutives:
#if not flags[0][idx][indexflag] == '0':
# print "Got", flags[0][idx][indexflag], idx
if flags[0][idx][indexflag] in ['3']:
a_t.append(float(t[idx]))
a_y.append(y[idx])
elif flags[0][idx][indexflag] in ['1']:
b_t.append(float(t[idx]))
b_y.append(y[idx])
elif flags[0][idx][indexflag] in ['2']:
c_t.append(float(t[idx]))
c_y.append(y[idx])
elif flags[0][idx][indexflag] in ['4']:
d_t.append(float(t[idx]))
d_y.append(y[idx])
else:
print("Found problem in flagging information - still to be solved")
print("Flag at count and its index position", cnt0, indexflag)
print("Flag and Comment", flags[0][cnt0], flags[1][cnt0])
if len(a_t) > 0:
if len(a_t) > 10000:
ax.plot(a_t,a_y,'-',c='r') ## Use lines if a lot of data is marked
else:
ax.scatter(a_t,a_y,c='r')
if len(b_t) > 0:
if len(b_t) > 10000:
ax.plot(b_t,b_y,'-',c='orange')
else:
ax.scatter(b_t,b_y,c='orange')
if len(c_t) > 0:
# TODO Here we have a masked nan warning - too be solved
#print np.asarray(c_t)
#print np.asarray(c_y)
if len(c_t) > 10000:
ax.plot(c_t,c_y,'-',c='g')
else:
ax.scatter(c_t,c_y,c='g')
if len(d_t) > 0:
if len(d_t) > 10000:
ax.plot(d_t,d_y,'-',c='b')
else:
ax.scatter(d_t,d_y,c='b')
# PLOT A GIVEN FUNCTION:
if 'function' in data[i]:
fkey = 'f'+key
function = data[i]['function']
if fkey in function[0]:
# --> Get the minimum and maximum relative times
ttmp = arange(0,1,0.0001)
ax.plot_date(denormalize(ttmp,function[1],function[2]),function[0][fkey](ttmp),'r-')
# PLOT SHADED AND ANNOTATED STORM PHASES:
if 'stormphases' in data[i]:
timespan = num2date(t[-1]) - num2date(t[0])
y_pos = 0.9 # have at height 90% of total plot, x_pos(n)=1-(1-y_pos)*n
y_anno = ymin + (1-(1-y_pos)*n_subplots)*(ymax-ymin)
t_phases = data[i]['stormphases']
if 'ssc' and 'mphase' in t_phases:
t_ssc = t_phases['ssc']
t_mphase = t_phases['mphase']
ax.axvspan(t_ssc, t_mphase, facecolor='red', alpha=0.3, linewidth=0)
if stormanno_s: # requirement so that only one plot is annotated
x_anno = t_ssc-timedelta(seconds=(timespan.seconds*0.1))
t_ssc_stream, idx_ssc = find_nearest(t, date2num(t_ssc))
y_ssc = y[idx_ssc]
ax.annotate('SSC', xy=(t_ssc,y_ssc),
xytext=(x_anno,y_anno),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
stormanno_s = False
if 'mphase' and 'rphase' in t_phases:
t_mphase = t_phases['mphase']
t_rphase = t_phases['rphase']
ax.axvspan(t_mphase, t_rphase, facecolor='yellow', alpha=0.3, linewidth=0)
if stormanno_m:
x_anno = t_mphase+timedelta(seconds=(timespan.seconds*0.03))
t_mphase_stream, idx_mphase = find_nearest(t, date2num(t_mphase))
y_mphase = y[idx_mphase]
ax.annotate('Main\nPhase', xy=(t_mphase,y_mphase),
xytext=(x_anno,y_anno),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6))
stormanno_m = False
if 'rphase' and 'stormend' in t_phases:
t_rphase = t_phases['rphase']
t_stormend = t_phases['stormend']
ax.axvspan(t_rphase, t_stormend, facecolor='green', alpha=0.3, linewidth=0)
if stormanno_r:
x_anno = t_rphase+timedelta(seconds=(timespan.seconds*0.03))
t_rphase_stream, idx_rphase = find_nearest(t, date2num(t_rphase))
y_rphase = y[idx_rphase]
ax.annotate('Recovery\nPhase', xy=(t_rphase,y_rphase),
xytext=(x_anno,y_anno),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6))
stormanno_r = False
#------------------------------------------------------------
# PART 2: Formatting the plot
#------------------------------------------------------------
# ADD SENSOR IDS TO DATA PLOTS:
if 'sensorid' in data[i]:
sensorid = data[i]['sensorid']
ydistance = [13,13,15,15,15,15,15,15]
ax.annotate(sensorid, xy=(10, ydistance[n_subplots-1]),
xycoords='axes points',
horizontalalignment='left', verticalalignment='top')
# ADD GRID:
if grid:
ax.grid(True,color=gridcolor,linewidth=0.5)
# SET X-LABELS:
timeunit = ''
if confinex:
tmin = np.min(t)
tmax = np.max(t)
# --> If dates to be confined, set value types:
_confinex(ax, tmax, tmin, timeunit)
if i < n_subplots-1:
setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel("Time (UTC) %s" % timeunit, color=labelcolor)
# SET TICK TO ALTERNATING SIDES:
if bool(i & 1):
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
# APPLY FORMATTERS:
label = data[i]['ylabel']
ax.set_ylim(ymin,ymax)
ax.set_ylabel(label, color=labelcolor)
ax.get_yaxis().set_major_formatter(plt_fmt)
#----------------------------------------------------------------
# PART 3: Finalising and saving plot
#----------------------------------------------------------------
# BUNDLE UP ALL SUBPLOTS:
fig.subplots_adjust(hspace=0)
# ADJUST X-AXIS FOR FULLDAY PLOTTING:
if fullday:
ax.set_xlim(np.floor(np.round(np.min(t)*100)/100),np.floor(np.max(t)+1))
# SAVE OR SHOW:
# TODO the next two line are used for gui
if figure:
return ax
if outfile:
path = os.path.split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
def _confinex(ax, tmax, tmin, timeunit):
"""
Automatically determines t-range so that the x-axis is easier
on the eye.
"""
trange = tmax - tmin
loggerstream.debug('plot: x range = %s' % str(trange))
if trange < 0.0001: # 8 sec level --> set 0.5 second
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%S'))
timeunit = '[Sec]'
elif trange < 0.01: # 13 minute level
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%M:%S'))
timeunit = '[M:S]'
elif trange <= 1: # day level --> set 1 hour
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
timeunit = '[H:M]'
elif trange < 7: # 3 day level
if trange < 2:
ax.get_xaxis().set_major_locator(matplotlib.dates.HourLocator(interval=6))
elif trange < 5:
ax.get_xaxis().set_major_locator(matplotlib.dates.HourLocator(interval=12))
else:
ax.get_xaxis().set_major_locator(matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO))
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%d.%b\n%H:%M'))
setp(ax.get_xticklabels(),rotation='0')
timeunit = '[Day-H:M]'
elif trange < 60: # month level
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%d.%b'))
setp(ax.get_xticklabels(),rotation='70')
timeunit = '[Day]'
elif trange < 150: # year level
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%d.%b\n%Y'))
setp(ax.get_xticklabels(),rotation='0')
timeunit = '[Day]'
elif trange < 600: # minute level
if trange < 300:
ax.get_xaxis().set_major_locator(matplotlib.dates.MonthLocator(interval=1))
elif trange < 420:
ax.get_xaxis().set_major_locator(matplotlib.dates.MonthLocator(interval=2))
else:
ax.get_xaxis().set_major_locator(matplotlib.dates.MonthLocator(interval=4))
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%b %Y'))
setp(ax.get_xticklabels(),rotation='0')
timeunit = '[Month]'
else:
ax.get_xaxis().set_major_formatter(matplotlib.dates.DateFormatter('%Y'))
timeunit = '[Year]'
#####################################################################
# #
# TESTING #
# Run this after making changes: #
# $ python mpplot.py #
# #
#####################################################################
if __name__ == '__main__':
print()
print("----------------------------------------------------------")
print("TESTING: PLOTTING PACKAGE")
print("All plotting methods will be tested. This may take a while.")
print("A summary will be presented at the end. Any protocols")
print("or functions with errors will be listed.")
print()
print("NOTE: This test requires graphical user interface")
print("confirmation of package integrity for the majority of")
print("functions. The plot titles specify what should be present")
print("in the plot for it to have plotted successfully.")
print(" So get comfy and have a good look.")
print("----------------------------------------------------------")
print()
print("Please enter path of a variometer data file for testing:")
print("(e.g. /srv/archive/WIC/LEMI025/LEMI025_2014-05-07.bin)")
while True:
filepath = raw_input("> ")
if os.path.exists(filepath):
break
else:
print("Sorry, that file doesn't exist. Try again.")
print()
now = datetime.utcnow()
testrun = 'plottest_'+datetime.strftime(now,'%Y%m%d-%H%M')
t_start_test = time.time()
errors = {}
print(datetime.utcnow(), "- Starting plot package test. This run: %s." % testrun)
while True:
# Step 1 - Read data
try:
teststream = read(filepath,tenHz=True)
print(datetime.utcnow(), "- Stream read in successfully.")
except Exception as excep:
errors['read'] = str(excep)
print(datetime.utcnow(), "--- ERROR reading stream. Aborting test.")
break
# Step 2 - Pick standard key for all other plots
try:
keys = teststream._get_key_headers()
key = [keys[0]]
key2 = [keys[0], keys[1]]
print(datetime.utcnow(), "- Using %s key for all subsequent plots." % key[0])
except Exception as excep:
errors['_get_key_headers'] = str(excep)
print(datetime.utcnow(), "--- ERROR getting default keys. Aborting test.")
break
# Step 3 - Simple single plot with ploteasy
try:
ploteasy(teststream)
print(datetime.utcnow(), "- Plotted using ploteasy function.")
except Exception as excep:
errors['ploteasy'] = str(excep)
print(datetime.utcnow(), "--- ERROR with ploteasy function. Aborting test.")
break
# Step 4 - Standard plot
try:
plot_new(teststream,key,
plottitle = "Simple plot of %s" % key[0])
print(datetime.utcnow(), "- Plotted standard plot.")
except Exception as excep:
errors['plot-vanilla'] = str(excep)
print(datetime.utcnow(), "--- ERROR with standard plot. Aborting test.")
break
# Step 5 - Multiple streams
streamlist = [teststream, teststream ]
variables = [key, key2 ]
try:
plotStreams(streamlist, variables,
plottitle = "Multiple streams: Three bars, top two should match.")
print(datetime.utcnow(), "- Plotted multiple streams.")
except Exception as excep:
errors['plotStreams-vanilla'] = str(excep)
print(datetime.utcnow(), "--- ERROR with plotting multiple streams. Aborting test.")
break
# Step 6 - Normalised stream comparison
try:
plotNormStreams([teststream], key[0],
confinex = True,
plottitle = "Normalized stream: Stream key should be normalized to zero.")
print(datetime.utcnow(), "- Plotted normalized streams.")
except Exception as excep:
errors['plotNormStreams'] = str(excep)
print(datetime.utcnow(), "--- ERROR plotting normalized streams.")
# Step 7 - Flagged plot
# ...
# Step 8a - Plot with phases (single)
t_start, t_end = teststream._find_t_limits()
timespan = t_end - t_start
t_stormphases = {}
t_stormphases['ssc'] = t_start + timedelta(seconds=(timespan.seconds*0.2))
t_stormphases['mphase'] = t_start + timedelta(seconds=(timespan.seconds*0.4))
t_stormphases['rphase'] = t_start + timedelta(seconds=(timespan.seconds*0.6))
t_stormphases['stormend'] = t_start + timedelta(seconds=(timespan.seconds*0.8))
try:
plot_new(teststream,key,
stormphases = True,
t_stormphases = t_stormphases,
plottitle = "Single plot showing all THREE storm phases, annotated")
print(datetime.utcnow(), "- Plotted annotated single plot of storm phases.")
except Exception as excep:
errors['plot-stormphases'] = str(excep)
print(datetime.utcnow(), "--- ERROR with storm phases plot.")
# Step 8b - Plot with phases (multiple)
try:
plotStreams(streamlist,variables,
stormphases = True,
t_stormphases = t_stormphases,
plottitle = "Multiple plot showing all THREE storm phases, annotated")
print(datetime.utcnow(), "- Plotted annotated multiple plot of storm phases.")
except Exception as excep:
errors['plotStreams-stormphases'] = str(excep)
print(datetime.utcnow(), "--- ERROR with storm phases multiple plot.")
# Step 9 - Plot satellite vs. magnetic data
try:
xmin, xmax = np.min(teststream._get_column('x')), np.max(teststream._get_column('x'))
ymin, ymax = np.min(teststream._get_column('y')), np.max(teststream._get_column('y'))
plotSatMag(teststream,teststream,['x','y'],
specialdict={'mag':[xmin-45,xmax+5],'sat':[ymin-5,ymax+45]},
plottitle = "Two variables in same plots with double y axes")
print(datetime.utcnow(), "- Plotted magnetic/satellite data.")
except Exception as excep:
errors['plotSatMag'] = str(excep)
print(datetime.utcnow(), "--- ERROR with plotSatMagplot.")
# Step 10 - Plot power spectrum
try:
freqm, asdm = plotPS(teststream,key[0],
returndata=True,
marks={'Look here!':0.0001, '...and here!':0.01},
plottitle = "Simple power spectrum plot with two marks")
print(datetime.utcnow(), "- Plotted power spectrum. Max frequency is at %s." % max(freqm))
except Exception as excep:
errors['plotPS'] = str(excep)
print(datetime.utcnow(), "--- ERROR plotting power spectrum.")
# Step 11 - Plot normal spectrogram
try:
plotSpectrogram(teststream,key2,
plottitle = "Spectrogram of two keys")
print(datetime.utcnow(), "- Plotted spectrogram.")
except Exception as excep:
errors['plotSpectrogram'] = str(excep)
print(datetime.utcnow(), "--- ERROR plotting spectrogram.")
# Step 12 - Plot function
try:
func = teststream.fit(key,knotstep=0.02)
plot_new(teststream,key,function=func,
plottitle = "Fit function plotted over original data.")
except Exception as excep:
errors['plot(function)'] = str(excep)
print(datetime.utcnow(), "--- ERROR plotting function.")
# Step 13 - Plot normal stereoplot
# (This should stay as last step due to coordinate conversion.)
try:
teststream._convertstream('xyz2idf')
plotStereoplot(teststream,
plottitle="Standard stereoplot")
print(datetime.utcnow(), "- Plotted stereoplot.")
except Exception as excep:
errors['plotStereoplot'] = str(excep)
print(datetime.utcnow(), "--- ERROR plotting stereoplot.")
# If end of routine is reached... break.
break
t_end_test = time.time()
time_taken = t_end_test - t_start_test
print(datetime.utcnow(), "- Stream testing completed in %s s. Results below." % time_taken)
print()
print("----------------------------------------------------------")
if errors == {}:
print("0 errors! Great! :)")
else:
print(len(errors), "errors were found in the following functions:")
print(str(errors.keys()))
print()
print("Would you like to print the exceptions thrown?")
excep_answer = raw_input("(Y/n) > ")
if excep_answer.lower() == 'y':
i = 0
for item in errors:
print(errors.keys()[i] + " error string:")
print(" " + errors[errors.keys()[i]])
i += 1
print()
print("Good-bye!")
print("----------------------------------------------------------")
|
gpl-3.0
|
pv/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
284
|
1528
|
"""
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
njwilson23/scipy
|
scipy/signal/filter_design.py
|
14
|
127885
|
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find an array of frequencies for computing the response of a filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator `b` and denominator `a` of a filter, compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator `b` and denominator `a` of a digital filter,
compute its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which h was computed, in radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(0, outb[:, 0], atol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(0, outb[:, 0], atol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Bessel filter, this is defined as the point at which the
asymptotes of the response are the same as a Butterworth filter of
the same order.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response.
As order increases, the Bessel filter approaches a Gaussian filter.
The digital Bessel filter is generated using the bilinear
transform, which does not preserve the phase response of the analog
filter. As such, it is only approximately correct at frequencies
below about fs/4. To get maximally flat group delay at higher
frequencies, the analog Bessel filter must be transformed using
phase-preserving techniques.
For a given `Wn`, the lowpass and highpass filter have the same phase vs
frequency curves; they are "phase-matched".
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the flat group delay and
the relationship to the Butterworth's cutoff frequency:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.plot(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter frequency response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) for analog prototype of an Nth order Bessel filter.
The filter is normalized such that the filter asymptotes are the same as
a Butterworth filter of the same order with an angular (e.g. rad/s)
cutoff frequency of 1.
Parameters
----------
N : int
The order of the Bessel filter to return zeros, poles and gain for.
Values in the range 0-25 are supported.
Returns
-------
z : ndarray
Zeros. Is always an empty array.
p : ndarray
Poles.
k : scalar
Gain. Always 1.
"""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %s" % N)
return asarray(z), asarray(p), k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
|
bsd-3-clause
|
lukauskas/scipy
|
scipy/stats/stats.py
|
18
|
169352
|
# Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
|
bsd-3-clause
|
errantlinguist/tangrams-analysis
|
write_mean_rounds_table.py
|
1
|
2577
|
#!/usr/bin/env python3
"""
Writes mean round count data in tabular format.
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright 2018 Todd Shore"
__license__ = "Apache License, Version 2.0"
import argparse
import csv
import statistics
import sys
from typing import Iterable, Sequence
import pandas as pd
from tangrams_analysis import game_utterances
from tangrams_analysis import session_data as sd
class RoundCounter(object):
@staticmethod
def __count_utts(session_datum: sd.SessionData,
session_round_utt_df_factory: game_utterances.SessionGameRoundUtteranceSequenceFactory) -> pd.Series:
round_utt_df = session_round_utt_df_factory(session_datum)
# Just use the "true" referent rows for extra safety in case something weird happens with the counts otherwise
round_utt_df = round_utt_df.loc[round_utt_df["REFERENT"] == True]
utt_counts = round_utt_df[session_round_utt_df_factory.UTTERANCE_SEQUENCE_COL_NAME].transform(
lambda utts: len(utts))
return utt_counts
def __init__(self):
pass
def __call__(self, session_data: Iterable[sd.SessionData]) -> Sequence[int]:
result = []
session_round_utt_df_factory = game_utterances.SessionGameRoundUtteranceSequenceFactory()
for session_datum in session_data:
round_utt_df = session_round_utt_df_factory(session_datum)
round_count = round_utt_df[game_utterances.EventColumn.ROUND_ID.value].nunique()
result.append(round_count)
return result
def __create_argparser() -> argparse.ArgumentParser:
result = argparse.ArgumentParser(
description="Writes mean round count data in tabular format.")
result.add_argument("inpaths", metavar="INPATH", nargs='+',
help="The paths to search for session data.")
return result
def __main(args):
inpaths = args.inpaths
print("Looking for session data underneath {}.".format(inpaths), file=sys.stderr)
infile_session_data = sd.walk_session_data(inpaths)
counter = RoundCounter()
counts = counter((session_datum for (_, session_datum) in infile_session_data))
# https://pythonconquerstheuniverse.wordpress.com/2011/05/08/newline-conversion-in-python-3/
writer = csv.writer(sys.stdout, dialect=csv.excel_tab, lineterminator="\n")
writer.writerow(("DESC", "VALUE"))
writer.writerow(("Total dialogues", len(counts)))
writer.writerow(("Mean rounds per dialogue", statistics.mean(counts)))
writer.writerow(("Median rounds per dialogue", statistics.median(counts)))
writer.writerow(("Stdev", statistics.stdev(counts)))
if __name__ == "__main__":
__main(__create_argparser().parse_args())
|
apache-2.0
|
xzmagic/code-for-blog
|
2009/qt_mpl_bars.py
|
19
|
7196
|
"""
This demo demonstrates how to embed a matplotlib (mpl) plot
into a PyQt4 GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich PyQt GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 19.01.2009
"""
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.textbox.setText('1 2 3 4')
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
str = unicode(self.textbox.text())
self.data = map(int, str.split())
x = range(len(self.data))
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
self.axes.bar(
left=x,
height=self.data,
width=self.slider.value() / 100.0,
align='center',
alpha=0.44,
picker=5)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
slider_label = QLabel('Bar width (%):')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 100)
self.slider.setValue(20)
self.slider.setTracking(True)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ self.textbox, self.draw_button, self.grid_cb,
slider_label, self.slider]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("This is a demo")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
unlicense
|
alexp25/d4w_app_lab
|
backend/modules/aux_fn.py
|
1
|
2133
|
import math
import numpy as np
import matplotlib.pylab as plt
def euclid_dist(t1, t2):
return math.sqrt(sum((t1 - t2) ** 2))
def get_comp(a1, a2):
"""
compares two cluster sets
each cluster set is an array of arrays
the clusters can be scrambled
so the comparison should check which are the closest clusters
and then return the difference
:param a1:
:param a2:
:param get_diff:
:return:
"""
comp_euclid_dist = [0] * len(a1)
diff_array = [0] * len(a1)
for i_comp, ri in enumerate(a1):
# take average distance as distance to the first centroid from the second data set
comp_euclid_dist[i_comp] = euclid_dist(ri, a2[0])
diff_array[i_comp] = ri - a2[0]
for j_comp, rj in enumerate(a2):
dist = euclid_dist(ri, rj)
# check if there is another centroid that is closer and use that to calculate the difference
if dist < comp_euclid_dist[i_comp]:
comp_euclid_dist[i_comp] = dist
diff_array[i_comp] = ri - rj
comp_euclid_dist_average = 0
for c in comp_euclid_dist:
comp_euclid_dist_average = comp_euclid_dist_average + c*c
comp_euclid_dist_average = np.sqrt(comp_euclid_dist_average)
# comp_avg = np.std(comp_euclid_dist)
return comp_euclid_dist, comp_euclid_dist_average, diff_array
def save_mat(mat, filename="mat.txt"):
s = ""
# print('\n'.join(str(aa) for aa in mat))
for row in mat:
if hasattr(row, "__len__"):
# print(row)
for col in row:
# print(col)
s += str(col) + "\t"
s += "\n"
print(s)
with open(filename, "wt") as f:
f.write(s)
def plot_from_matrix(m,colors=None):
for (i, ts) in enumerate(m):
if colors is not None and i < len(colors):
plt.plot(ts, colors[i])
else:
plt.plot(ts)
def get_array_of_arrays(a):
array = []
for ag in a:
for ag1 in ag:
array.append(ag1)
return array
|
mit
|
giorgiop/scikit-learn
|
examples/svm/plot_iris.py
|
24
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
boland1992/SeisSuite
|
build/lib/ambient/azimuth/heatinterpolate.py
|
8
|
3647
|
#!/usr/bin/env python
# combining density estimation and delaunay interpolation for confidence-weighted value mapping
# Dan Stowell, April 2013
import numpy as np
from numpy import random
from math import exp, log
from scipy import stats, mgrid, c_, reshape, rot90
import matplotlib.delaunay
import matplotlib.tri as tri
import matplotlib.delaunay.interpolate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from colorsys import hls_to_rgb
#############################
# user settings
n = 100
gridsize = 100
fontsize = 'xx-small'
#############################
# first generate some random [x,y,z] data -- random locations but closest to the middle, and random z-values
data = random.randn(3, n) * 100.
# we will add some correlation to the z-values
data[2,:] += data[1,:]
data[2,:] += data[0,:]
# scale the z-values to 0--1 for convenience
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
data[2,:] = (data[2,:] - zmin) / (zmax - zmin)
xmin = np.min(data[0,:])
xmax = np.max(data[0,:])
ymin = np.min(data[1,:])
ymax = np.max(data[1,:])
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
##################################################
# plot it simply
plt.figure()
fig = plt.subplot(2,2,1)
for datum in data.T:
plt.plot(datum[0], datum[1], 'x', color=str(1.0 - datum[2]))
plt.title("scatter", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a KDE of it and plot that
fig = plt.subplot(2,2,2)
kdeX, kdeY = mgrid[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
positions = c_[kdeX.ravel(), kdeY.ravel()]
values = c_[data[0,:], data[1,:]]
kernel = stats.kde.gaussian_kde(values.T)
kdeZ = reshape(kernel(positions.T).T, kdeX.T.shape)
plt.imshow(rot90(kdeZ), cmap=cm.binary, aspect='auto')
plt.title("density of points", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a delaunay triangulation of it and plot that
fig = plt.subplot(2,2,3)
tt = matplotlib.delaunay.triangulate.Triangulation(data[0,:], data[1,:])
#triang = tri.Triangulation(data[0,:], data[1,:])
#plt.triplot(triang, 'bo-') # this plots the actual triangles of the triangulation. I'm more interested in their interpolated values
#extrap = tt.linear_extrapolator(data[2,:])
extrap = tt.nn_extrapolator(data[2,:])
interped = extrap[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
plt.imshow(rot90(interped), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated values", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now combine delaunay with KDE
fig = plt.subplot(2,2,4)
colours = np.zeros((gridsize, gridsize, 4))
kdeZmin = np.min(kdeZ)
kdeZmax = np.max(kdeZ)
confdepth = 0.45
for x in range(gridsize):
for y in range(gridsize):
conf = (kdeZ[x,y] - kdeZmin) / (kdeZmax - kdeZmin)
val = min(1., max(0., interped[x,y]))
colour = list(cm.gist_earth_r(val))
# now fade it out to white according to conf
for index in [0,1,2]:
colour[index] = (colour[index] * conf) + (1.0 * (1. -conf))
colours[x,y,:] = colour
#colours[x,y,:] = np.hstack((hls_to_rgb(val, 0.5 + confdepth - (confdepth * conf), 1.0), 1.0))
#colours[x,y,:] = [conf, conf, 1.0-conf, val]
plt.imshow(rot90(colours), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated & confidence-shaded", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
############################################
plt.savefig("output/plot_heati_simple.pdf", papertype='A4', format='pdf')
|
gpl-3.0
|
EttusResearch/gnuradio
|
gr-analog/examples/fmtest.py
|
40
|
7941
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
jlopezpena/bearcart
|
bearcart/bearcart.py
|
1
|
9096
|
# -*- coding: utf-8 -*-
'''
Rickshaw
-------
Python Pandas + Rickshaw.js
'''
from __future__ import print_function
from __future__ import division
import time
import json
import os
from collections import defaultdict
from pkg_resources import resource_string
import pandas as pd
import numpy as np
from jinja2 import Environment, PackageLoader
class Chart(object):
'''Visualize Pandas Timeseries with Rickshaw.js'''
def __init__(self, data=None, width=750, height=400, plt_type='line',
colors=None, x_time=True, y_zero=False, **kwargs):
'''Generate a Rickshaw time series visualization with Pandas
Series and DataFrames.
The bearcart Chart generates the Rickshaw visualization of a Pandas
timeseries Series or DataFrame. The only required parameters are
data, width, height, and type. Colors is an optional parameter;
bearcart will default to the Rickshaw spectrum14 color palette if
none are passed. Keyword arguments can be passed to disable the
following components:
- x_axis
- y_axis
- hover
- legend
Parameters
----------
data: Pandas Series or DataFrame, default None
The Series or Dataframe must have a Datetime index.
width: int, default 960
Width of the chart in pixels
height: int, default 500
Height of the chart in pixels
plt_type: string, default 'line'
Must be one of 'line', 'area', 'scatterplot' or 'bar'
colors: dict, default None
Dict with keys matching DataFrame or Series column names, and hex
strings for colors
x_time: boolean, default True
If passed as False, the x-axis will have non-time values
y_zero: boolean, default False
The y-axis defaults to auto scaling. Pass True to set the min
y-axis value to 0.
kwargs:
Keyword arguments that, if passed as False, will disable the
following components: x_axis, y_axis, hover, legend
Returns
-------
Bearcart object
Examples
--------
>>>vis = bearcart.Chart(data=df, width=800, height=300, type='area')
>>>vis = bearcart.Chart(data=series,type='scatterplot',
colors={'Data 1': '#25aeb0',
'Data 2': '#114e4f'})
#Disable x_axis and legend
>>>vis = bearcart.Chart(data=df, x_axis=False, legend=False)
'''
self.defaults = {'x_axis': True, 'y_axis': True, 'hover': True,
'legend': True}
self.env = Environment(loader=PackageLoader('bearcart', 'templates'))
#Colors need to be js strings
if colors:
self.colors = {key: "'{0}'".format(value)
for key, value in colors.iteritems()}
else:
self.colors = None
self.x_axis_time = x_time
self.renderer = plt_type
self.width = width
self.height = height
self.y_zero = y_zero
self.template_vars = {}
#Update defaults for passed kwargs
for key, value in kwargs.iteritems():
self.defaults[key] = value
#Get templates for graph elements
for att, val in self.defaults.iteritems():
render_vars = {}
if val:
if not self.x_axis_time:
if att == 'x_axis':
att = 'x_axis_num'
elif att == 'hover':
render_vars = {'x_hover': 'xFormatter: function(x)'
'{return Math.floor(x / 10) * 10}'}
temp = self.env.get_template(att + '.js')
self.template_vars.update({att: temp.render(render_vars)})
#Transform data into Rickshaw-happy JSON format
if data is not None:
self.transform_data(data)
def transform_data(self, data):
'''Transform Pandas Timeseries into JSON format
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series must have datetime index
Returns
-------
JSON to object.json_data
Example
-------
>>>vis.transform_data(df)
>>>vis.json_data
'''
def type_check(value):
'''Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explictly converted.'''
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
objectify = lambda dat: [{"x": type_check(x), "y": type_check(y)}
for x, y in dat.iteritems()]
self.raw_data = data
if isinstance(data, pd.Series):
data.name = data.name or 'data'
self.json_data = [{'name': data.name, 'data': objectify(data)}]
elif isinstance(data, pd.DataFrame):
self.json_data = [{'name': x[0], 'data': objectify(x[1])}
for x in data.iteritems()]
def _build_graph(self):
'''Build Rickshaw graph syntax with all data'''
#Set palette colors if necessary
if not self.colors:
self.palette = self.env.get_template('palette.js')
self.template_vars.update({'palette': self.palette.render()})
self.colors = {x['name']: 'palette.color()' for x in self.json_data}
template_vars = []
for index, dataset in enumerate(self.json_data):
group = 'datagroup' + str(index)
template_vars.append({'name': str(dataset['name']),
'color': self.colors[dataset['name']],
'data': 'json[{0}].data'.format(index)})
variables = {'dataset': template_vars, 'width': self.width,
'height': self.height, 'render': self.renderer}
if not self.y_zero:
variables.update({'min': "min: 'auto',"})
graph = self.env.get_template('graph.js')
self.template_vars.update({'graph': graph.render(variables)})
def create_chart(self, html_path='index.html', data_path='data.json',
js_path='rickshaw.min.js', css_path='rickshaw.min.css',
html_prefix=''):
'''Save bearcart output to HTML and JSON.
Parameters
----------
html_path: string, default 'index.html'
Path for html output
data_path: string, default 'data.json'
Path for data JSON output
js_path: string, default 'rickshaw.min.js'
If passed, the Rickshaw javascript library will be saved to the
path. The file must be named "rickshaw.min.js"
css_path: string, default 'rickshaw.min.css'
If passed, the Rickshaw css library will be saved to the
path. The file must be named "rickshaw.min.css"
html_prefix: Prefix path to be appended to all the other paths for file
creation, but not in the generated html file. This is needed if the
html file does not live in the same folder as the running python
script.
Returns
-------
HTML, JSON, JS, and CSS
Example
--------
>>>vis.create_chart(html_path='myvis.html', data_path='visdata.json'),
js_path='rickshaw.min.js',
cs_path='rickshaw.min.css')
'''
self.template_vars.update({'data_path': str(data_path),
'js_path': js_path,
'css_path': css_path})
self._build_graph()
html = self.env.get_template('bcart_template.html')
self.HTML = html.render(self.template_vars)
with open(os.path.join(html_prefix, html_path), 'w') as f:
f.write(self.HTML)
with open(os.path.join(html_prefix, data_path), 'w') as f:
json.dump(self.json_data, f, sort_keys=True, indent=4,
separators=(',', ': '))
if js_path:
js = resource_string('bearcart', 'rickshaw.min.js')
with open(os.path.join(html_prefix, js_path), 'w') as f:
f.write(js)
if css_path:
css = resource_string('bearcart', 'rickshaw.min.css')
with open(os.path.join(html_prefix, css_path), 'w') as f:
f.write(css)
|
mit
|
martinwicke/tensorflow
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
30
|
2249
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
untom/scikit-learn
|
sklearn/cross_decomposition/cca_.py
|
209
|
3150
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
bsd-3-clause
|
deepesch/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
258
|
2861
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
IssamLaradji/scikit-learn
|
sklearn/decomposition/base.py
|
12
|
5524
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.