repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/metrics/tests/test_regression.py | 1 | 6109 | from __future__ import division, print_function
from itertools import product
import numpy as np
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]] * 4
y_pred = [[1, 1]] * 4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| mit |
kobejean/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
tim-taylor/evobee | utils/plot-results-envsize.py | 1 | 2445 | #!/usr/bin/env python3
#
# Script to plot evobee results from multiple runs with error bars
#
# Usage: plot-results.py title statsfile [statsfile2 [statsfile3 ...]]
# where each statsfile is a CSV format with layout: SZ,N,min,Q1,median,Q3,max,mean,sd
# C=constancy, P=patchiness, and the rest is the output of the command:
# sta --min --max --mean --sd --sample --q --delimiter $',' < pollination-50pc-times-s$S-c0p$C.txt
#
# Outputs: a graphics file called graph-<title>.pdf
#
# Author: Tim Taylor (http://www.tim-taylor.com)
# Last update: 9 October 2017
import sys
import os
import csv
import re
import matplotlib.pyplot as plt
def main():
# check we have all of the required command line info
if len(sys.argv) < 3:
print("Usage: {} title statsfile [statsfile2 [statsfile3 ...]]"
.format(os.path.basename(sys.argv[0])), file=sys.stderr)
sys.exit(1)
# parse the command line info
title = sys.argv[1]
statsfilenames = [sys.argv[i] for i in range(2, len(sys.argv))]
for sfile in statsfilenames:
if not os.path.exists(sfile) or not os.path.isfile(sfile):
print("Stats file '{}' does not exist or is not a regular file!".format(sfile), file=sys.stderr)
exit(1)
with open(sfile, 'r') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
data = list(reader)
x = [row[0] for row in data] # EnvSize
y = [row[4] for row in data] # Median
#y = [row[7] for row in data] # Mean
e = [row[8] for row in data] # StdDev
xmin = x[0]
xmax = x[-1]
m = re.match(r".*-(.*).txt", sfile)
desc = m.group(1)
plt.errorbar(x, y, yerr=e, fmt='-o', label=desc)
plt.xlabel('Environment size')
plt.ylabel('Fixation time')
plt.legend(loc='upper left', prop={'size': 10})
plt.title(title)
plt.grid()
plt.xlim(xmin-2, xmax+2)
#plt.ylim(140,350)
#plt.ylim(140,550)
#plt.show()
# Replace spaces etc in title so we can use it in the filename of the graph
filename = 'graph-'+title+'.pdf'
for ch in [' ',',','(',')','[',']']:
if ch in filename:
filename = filename.replace(ch,"-")
filename = filename.replace('---','-')
filename = filename.replace('--','-')
plt.savefig(filename)
##-------------------------------------------------------##
if __name__ == '__main__':
main()
| gpl-3.0 |
jstoxrocky/statsmodels | statsmodels/tsa/filters/filtertools.py | 8 | 12430 | # -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = np.ceil(len(filt)/2.) - 1 or None
trim_tail = (np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| bsd-3-clause |
wanghaven/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
appapantula/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/__init__.py | 59 | 3038 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
bjodah/chemreac | examples/robertson.py | 2 | 3609 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The fruit fly of stiff numerical chemical kinetics problems.
$ python robertson.py -A 1.0 -B 1e-20 -C 1e-20 --t0 0 --plot --tend 3e10 --nt \
1024 --logt --logy --verbose
"""
from __future__ import (absolute_import, division, print_function)
from math import log10
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.chemistry import Reaction, ReactionSystem
from chemreac.integrate import run
from chemreac.util.analysis import suggest_t0
from chemreac.util.plotting import (
plot_C_vs_t, save_and_or_show_plot, plot_faded_time
)
def get_reactions(rates):
"""
A -> B
B + C -> A + C
B + B -> C
"""
return (
Reaction({'A': 1}, {'B': 1}, rates[0]),
Reaction({'B': 1, 'C': 1}, {'A': 1, 'C': 1}, rates[1]),
Reaction({'B': 2}, {'B': 1, 'C': 1}, rates[2])
)
def integrate_rd(tend=1e2, A0=1.0, B0=0.0, C0=0.0, k1=0.04, k2=1e4, k3=3e7,
t0=1e2, nt=100, N=1, nstencil=3, logt=False, logy=False,
plot=False, savefig='None', verbose=False, dump_expr='False',
use_chempy=False, D=2e-3):
if N == 1:
init_conc = (A0, B0, C0)
else:
init_conc = np.tile((A0, B0, C0), (N, 1))
init_conc /= np.linspace(1, 2, N).reshape((N, 1))**.5
rsys = ReactionSystem(get_reactions((k1, k2, k3)), 'ABC')
if verbose:
print([str(_) for _ in rsys.rxns])
if use_chempy:
from chempy.kinetics.ode import get_odesys
odesys = get_odesys(rsys, include_params=True)
if N != 1:
raise ValueError("ChemPy does not support diffusion")
odesys.integrate(np.logspace(log10(t0), log10(tend)), init_conc)
if plot:
odesys.plot_result(xscale='log', yscale='log')
result = None
else:
rd = ReactionDiffusion.from_ReactionSystem(
rsys, N=N, nstencil=1 if N == 1 else nstencil, logt=logt,
logy=logy, D=[D/2, D/3, D/5])
if dump_expr.lower() not in ('false', '0'):
from chemreac.symbolic import SymRD
import sympy as sp
cb = {'latex': sp.latex,
'ccode': sp.ccode}.get(dump_expr.lower(), str)
srd = SymRD.from_rd(rd, k=sp.symbols('k:3'))
print('dydx:')
print('\n'.join(map(cb, srd._f)))
print('jac:')
for ri, row in enumerate(srd.jacobian.tolist()):
for ci, expr in enumerate(row):
if expr == 0:
continue
print(ri, ci, cb(expr))
return None
if t0 == 0 and logt:
t0 = 1e-3*suggest_t0(rd, init_conc)
if verbose:
print("Using t0 = %12.5g" % t0)
t = np.logspace(np.log10(t0), np.log10(tend), nt)
print(t[0], t[-1])
integr = run(rd, init_conc, t)
if verbose:
import pprint
pprint.pprint(integr.info)
if plot:
if N == 1:
plot_C_vs_t(integr, xscale='log', yscale='log')
else:
import matplotlib.pyplot as plt
for idx, name in enumerate('ABC', 1):
plt.subplot(1, 3, idx)
rgb = [.5, .5, .5]
rgb[idx-1] = 1
plot_faded_time(integr, name, rgb=rgb, log_color=True)
result = integr
if plot:
save_and_or_show_plot(savefig=savefig)
return result
if __name__ == '__main__':
import argh
argh.dispatch_command(integrate_rd)
| bsd-2-clause |
UDST/urbanaccess | urbanaccess/utils.py | 1 | 8641 | # The following logging functions were modified from the osmnx library and
# used with permission from the author Geoff Boeing:
# log, _get_logger: https://github.com/gboeing/osmnx/blob/master/osmnx/utils.py
import logging as lg
import unicodedata
import sys
import datetime as dt
import os
import pandas as pd
from urbanaccess import config
def log(message, level=None, name=None, filename=None):
"""
Write a message to the log file and/or print to the console.
Parameters
----------
message : string
the content of the message to log
level : int, optional
one of the logger.level constants
name : string, optional
name of the logger
filename : string, optional
name of the log file
Returns
-------
None
"""
if level is None:
level = lg.INFO
if name is None:
name = config.settings.log_name
if filename is None:
filename = config.settings.log_filename
if config.settings.log_file:
# get the current logger or create a new one then log message at
# requested level
logger = _get_logger(level=level, name=name, filename=filename)
if level == lg.DEBUG:
logger.debug(message)
elif level == lg.INFO:
logger.info(message)
elif level == lg.WARNING:
logger.warning(message)
elif level == lg.ERROR:
logger.error(message)
# if logging to console is turned on, convert message to ascii and print
# to the console only
if config.settings.log_console:
# capture current stdout, then switch it to the console, print the
# message, then switch back to what had been the stdout
# this prevents logging to notebook - instead, it goes to console
standard_out = sys.stdout
sys.stdout = sys.__stdout__
# convert message to ascii for proper console display in windows
# terminals
message = unicodedata.normalize('NFKD', str(message)).encode(
'ascii', errors='replace').decode()
print(message)
sys.stdout = standard_out
# otherwise print out standard statement
else:
print(message)
def _get_logger(level=None, name=None, filename=None):
"""
Create a logger or return the current one if already instantiated.
Parameters
----------
level : int, optional
one of the logger.level constants
name : string, optional
name of the logger
filename : string, optional
name of the log file
Returns
-------
logger : logger.logger
"""
if level is None:
level = config.settings.log_level
if name is None:
name = config.settings.log_name
if filename is None:
filename = config.settings.log_filename
logger = lg.getLogger(name)
# if a logger with this name is not already established
if not getattr(logger, 'handler_set', None):
todays_date = dt.datetime.today().strftime('%Y_%m_%d')
log_filename = '{}/{}_{}.log'.format(config.settings.logs_folder,
filename, todays_date)
if not os.path.exists(config.settings.logs_folder):
os.makedirs(config.settings.logs_folder)
# create file handler and log formatter and establish settings
handler = lg.FileHandler(log_filename, encoding='utf-8')
formatter = lg.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
logger.handler_set = True
return logger
def create_hdf5(dir=None, filename=None, overwrite_hdf5=False):
"""
Create an empty HDF5 file
Parameters
----------
dir : string, optional
directory to save HDF5 file, if None defaults to dir set in
config.settings.data_folder
filename : string, optional
name of the HDF5 file to save with .h5 extension, if None defaults
to urbanaccess.h5
overwrite_hdf5 : bool, optional
if true any existing HDF5 file with the specified name in the
specified directory will be overwritten
Returns
-------
None
"""
if dir is None:
dir = config.settings.data_folder
else:
if not isinstance(dir, str):
raise ValueError('Directory must be a string.')
try:
if not os.path.exists(dir):
os.makedirs(dir)
except Exception:
raise ValueError('Unable to make directory {}.'.format(dir))
if filename is None:
filename = 'urbanaccess.h5'
else:
if not isinstance(filename, str):
raise ValueError('Filename must be a string.')
hdf5_save_path = os.path.join(dir, filename)
if not filename.endswith('.h5'):
raise ValueError('HDF5 filename extension must be "h5".')
if not os.path.exists(hdf5_save_path):
store = pd.HDFStore(hdf5_save_path)
store.close()
log(' New {} HDF5 store created in dir: {}.'.format(filename, dir))
elif overwrite_hdf5 and os.path.exists(hdf5_save_path):
store = pd.HDFStore(hdf5_save_path)
store.close()
log(' Existing {} HDF5 store in dir: {} has been '
'overwritten.'.format(filename, dir))
else:
log(' Using existing HDF5 store: {}.'.format(hdf5_save_path))
return hdf5_save_path
def df_to_hdf5(data=None, key=None, overwrite_key=False, dir=None,
filename=None, overwrite_hdf5=False):
"""
Write a pandas.DataFrame to a table in a HDF5 file
Parameters
----------
data : pandas.DataFrame
pandas.DataFrame to save to a HDF5 table
key : string
name of table to save DataFrame as in the HDF5 file
overwrite_key : bool, optional
if true any existing table with the specified key name will be
overwritten
dir : string
directory to save HDF5 file
filename : string
name of the HDF5 file to save with .h5 extension
overwrite_hdf5 : bool, optional
if true any existing HDF5 file with the specified name in the
specified directory will be overwritten
Returns
-------
None
"""
hdf5_save_path = create_hdf5(
dir=dir, filename=filename, overwrite_hdf5=overwrite_hdf5)
store = pd.HDFStore(hdf5_save_path, mode='r')
if not ''.join(['/', key]) in store.keys():
store.close()
data.to_hdf(hdf5_save_path, key=key, mode='a', format='table')
log(' DataFrame: {} saved in HDF5 store: {}.'.format(
key, hdf5_save_path))
elif ''.join(['/', key]) in store.keys() and overwrite_key:
store.close()
data.to_hdf(hdf5_save_path, key=key, mode='a', format='table')
log(' Existing DataFrame: {} overwritten in HDF5 store: {}.'.format(
key, hdf5_save_path))
else:
store.close()
log(' Key {} already exists in HDF5 store: {}. '
'Set to overwrite_key = True to replace existing '
'data in key.'.format(key, hdf5_save_path))
def hdf5_to_df(dir=None, filename=None, key=None):
"""
Read data from a HDF5 file to a pandas.DataFrame
Parameters
----------
dir : string
directory of the HDF5 file to read from
filename : string
name of the HDF5 file with .h5 extension to read from
key : string
table inside the HDF5 file to return as a pandas.DataFrame
Returns
-------
df : pandas.DataFrame
"""
if dir is None:
dir = config.settings.data_folder
else:
if not isinstance(dir, str):
raise ValueError('Directory must be a string.')
if filename is None:
filename = 'urbanaccess_net.h5'
else:
if not isinstance(filename, str):
raise ValueError('Filename must be a string.')
hdf5_load_path = os.path.join(dir, filename)
if not filename.endswith('.h5'):
raise ValueError('HDF5 filename extension must be "h5".')
if not os.path.exists(hdf5_load_path):
raise ValueError('Unable to find directory or file: {}.'.format(
hdf5_load_path))
with pd.HDFStore(hdf5_load_path) as store:
log(' Reading HDF5 store: {}...'.format(hdf5_load_path))
try:
df = store[key]
log(' Successfully returned: {} as DataFrame.'.format(key))
except Exception:
raise ValueError('Unable to find key: {}. Keys found: {}.'.format(
key, store.keys()))
return df
| agpl-3.0 |
saltastro/salt-data-quality-site | app/main/pages/instrument/hrs/blue/arc/plots.py | 1 | 7256 | import pandas as pd
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
def get_source(start_date, end_date, obsmode):
filename = 'H%%'
logic = " and OBSMODE='{obsmode}' " \
" and DeltaX > -99 " \
" and FileName like '{filename}' " \
" and Object = 1 group by UTStart, HrsOrder" \
.format(filename=filename, obsmode=obsmode)
sql = "Select UTStart, HrsOrder, AVG(DeltaX) as avg, CONVERT(UTStart,char) AS Time " \
" from DQ_HrsArc join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
colors = []
if len(df) > 0:
ord_min = df['HrsOrder'].min()
ord_max = df['HrsOrder'].max()
colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in
df["HrsOrder"]]
df['colors'] = colors
source = ColumnDataSource(df)
return source
@data_quality(name='high_resolution', caption='')
def hrs_high_resolution_plot(start_date, end_date):
"""Return a <div> element with the High resolution AVG(DeltaX) vs time.
The plot shows the AVG(DeltaX) for a set of time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the High resolution AVG(DeltaX) vs time.
"""
obsmode = 'HIGH RESOLUTION'
source = get_source(start_date, end_date, obsmode)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">AVERAGE: </span>
<span style="font-size: 15px;"> @avg</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="High Resolution",
x_axis_label='Date',
y_axis_label='AVG(DeltaX)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='avg', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='medium_resolution', caption='')
def hrs_medium_resolution_plot(start_date, end_date):
"""
Return a <div> element with the Medium resolution AVG(DeltaX) vs time.
The plot shows the AVG(DeltaX) for a set of time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Medium resolution AVG(DeltaX) vs time.
"""
obsmode = 'MEDIUM RESOLUTION'
source = get_source(start_date, end_date, obsmode)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">AVERAGE: </span>
<span style="font-size: 15px;"> @avg</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="Medium Resolution",
x_axis_label='Date',
y_axis_label='AVG(DeltaX)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='avg', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='low_resolution', caption='')
def hrs_low_resolution_plot(start_date, end_date):
"""
Return a <div> element with the Low resolution AVG(DeltaX) vs time.
The plot shows the AVG(DeltaX) for a set of time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Low resolution AVG(DeltaX) vs time.
"""
obsmode = 'LOW RESOLUTION'
source = get_source(start_date, end_date, obsmode)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">AVERAGE: </span>
<span style="font-size: 15px;"> @avg</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="Low Resolution",
x_axis_label='Date',
y_axis_label='AVG(DeltaX)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='avg', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
| mit |
Windy-Ground/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/utils/estimator_checks.py | 41 | 47834 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
Obus/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
nextml/NEXT | next/api/resources/targets.py | 2 | 1422 | from StringIO import StringIO
import pandas as pd
from flask import Flask, send_file, request, abort
from flask_restful import Resource, reqparse
import traceback
import json
from io import BytesIO
import zipfile
import next.utils
import next.utils as utils
import next.api.api_util as api_util
from next.api.api_util import APIArgument
from next.api.resource_manager import ResourceManager
from next.database_client.DatabaseAPI import DatabaseAPI
from next.logging_client.LoggerAPI import LoggerAPI
from next.apps.App import App
db = DatabaseAPI()
ell = LoggerAPI()
resource_manager = ResourceManager()
# Request parser. Checks that necessary dictionary keys are available in a given resource.
# We rely on learningLib functions to ensure that all necessary arguments are available and parsed.
post_parser = reqparse.RequestParser(argument_class=APIArgument)
# Custom errors for GET and POST verbs on experiment resource
meta_error = {
'ExpDoesNotExistError': {
'message': "No experiment with the specified experiment ID exists.",
'code': 400,
'status':'FAIL'
},
}
meta_success = {
'code': 200,
'status': 'OK'
}
# Participants resource class
class Targets(Resource):
def get(self, exp_uid):
app_id = resource_manager.get_app_id(exp_uid)
app = App(app_id, exp_uid, db, ell)
butler = app.butler
return butler.targets.get_targetset(exp_uid)
| apache-2.0 |
NicholasBermuda/transit-fitting | setup.py | 2 | 1295 | from setuptools import setup, find_packages
import os,sys
def readme():
with open('README.md') as f:
return f.read()
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__TRANSITFIT_SETUP__ = True
import transitfit
version = transitfit.__version__
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Push a new tag to GitHub.
if "tag" in sys.argv:
os.system("git tag -a {0} -m 'version {0}'".format(version))
os.system("git push --tags")
sys.exit()
setup(name = "transitfit",
version = version,
description = "Pythonic fitting of transits.",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "[email protected]",
url = "https://github.com/timothydmorton/transit-fitting",
packages = find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.14','emcee>=2',
'kplr', 'transit', 'triangle_plot'],
zip_safe=False
)
| mit |
kcompher/FreeDiscovUI | freediscovery/lsi.py | 1 | 5918 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from sklearn.externals import joblib
from sklearn.preprocessing import normalize
from sklearn.utils import check_array, as_float_array, check_random_state
from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.decomposition import TruncatedSVD
from .text import FeatureVectorizer
from .base import _BaseWrapper
from .categorization import NearestNeighborRanker
from .utils import setup_model
from .exceptions import (WrongParameter, NotImplementedFD)
def _touch(filename):
open(filename, 'ab').close()
class _LSIWrapper(_BaseWrapper):
"""Document categorization using Latent Semantic Indexing (LSI)
Parameters
----------
cache_dir : str
folder where the model will be saved
parent_id : str
dataset id
mid : str
LSI model id (the dataset id will be inferred)
verbose : bool, optional
print progress messages
"""
_wrapper_type = "lsi"
def __init__(self, cache_dir='/tmp/', parent_id=None, mid=None, verbose=False):
super(_LSIWrapper, self).__init__(cache_dir=cache_dir,
parent_id=parent_id, mid=mid)
def fit_transform(self, n_components=150, n_iter=5):
"""
Perform the SVD decomposition
Parameters
----------
n_components : int
number of selected singular values (number of LSI dimensions)
n_iter : int
number of iterations for the stochastic SVD algorithm
Returns
-------
mid : str
model id
lsi : _BaseWrapper
the TruncatedSVD object
exp_var : float
the explained variance of the SVD decomposition
"""
parent_id = self.pipeline.mid
dsid_dir = self.fe.dsid_dir
if not os.path.exists(dsid_dir):
raise IOError
pars = {'parent_id': parent_id, 'n_components': n_components}
mid_dir_base = os.path.join(dsid_dir, self._wrapper_type)
if not os.path.exists(mid_dir_base):
os.mkdir(mid_dir_base)
mid, mid_dir = setup_model(mid_dir_base)
ds = self.pipeline.data
svd = _TruncatedSVD_LSI(n_components=n_components,
n_iter=n_iter #, algorithm='arpack'
)
lsi = svd
lsi.fit(ds)
ds_p = lsi.transform_lsi_norm(ds)
joblib.dump(pars, os.path.join(mid_dir, 'pars'), compress=9)
joblib.dump(lsi, os.path.join(mid_dir, 'model'))
joblib.dump(ds_p, os.path.join(mid_dir, 'data'))
exp_var = lsi.explained_variance_ratio_.sum()
self.mid = mid
return lsi, exp_var
# The below class is identical to TruncatedSVD,
# https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/decomposition/truncated_svd.py#L25
# the only reason is the we need to save the Sigma matrix when performing this transform!
# This will not longer be necessary with sklearn v0.19
class _TruncatedSVD_LSI(TruncatedSVD):
"""
A patch of `sklearn.decomposition.TruncatedSVD` to include whitening (`scikit-learn/scikit-learn#7832)`
"""
def transform_lsi(self, X):
""" LSI transform, normalized by the inverse of the eigen values"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T).dot(np.diag(1./self.Sigma))
def transform_lsi_norm(self, X):
Y = self.transform_lsi(X)
normalize(Y, copy=False)
return Y
def fit_transform(self, X, y=None):
""" Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
self.Sigma = Sigma[:self.n_components]
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
| bsd-3-clause |
BlueFern/parbrain | util/plot_csv.py | 1 | 2157 | import matplotlib.pyplot as plt
import numpy as np
t = []
R2072 = []
R2518 = []
R2258 = []
R2254 = []
Kp2072 = []
Kp2518 = []
Kp2258 = []
Kp2254 = []
#
R2072, Kp2072, t = np.loadtxt('test/cell20720.0.csv', delimiter=',', unpack=True)
R2518, Kp2518, t = np.loadtxt('test/cell25180.0.csv', delimiter=',', unpack=True)
R2258, Kp2258, t = np.loadtxt('test/cell22580.0.csv', delimiter=',', unpack=True)
R2254, Kp2254, t = np.loadtxt('test/cell22540.0.csv', delimiter=',', unpack=True)
#
##NKs = NKs*64434963 - 2000
#
plt.plot(t, R2072, label="Cell 2072")
plt.plot(t, R2518, label="Cell 2518")
plt.plot(t, R2258, label="Cell 2258")
plt.plot(t, R2254, label="Cell 2254")
#plt.plot(t, Kp2072, label="Cell 2072")
#plt.plot(t, Kp2518, label="Cell 2518")
#plt.plot(t, Kp2258, label="Cell 2258")
#plt.plot(t, Kp2254, label="Cell 2254")
plt.legend(loc=1)
plt.xlabel('Time (sec)')
plt.ylabel('Radius ($\mu$m)')
#plt.ylabel('Perivascular [K+] ($\mu$M)')
#
#
fig = plt.gcf()
fig.set_size_inches(4,6)
plt.savefig("CSD_Rplots.svg")
#Kp2072 = []
#Kp2518 = []
#Kp2258 = []
#Kp2254 = []
#
#t, Kp2072 = np.loadtxt('csvFiles/Kpcell2072.csv', delimiter=',', unpack=True)
#t, Kp2518 = np.loadtxt('csvFiles/Kpcell2518.csv', delimiter=',', unpack=True)
#t, Kp2258 = np.loadtxt('csvFiles/Kpcell2258.csv', delimiter=',', unpack=True)
#t, Kp2254 = np.loadtxt('csvFiles/Kpcell2254.csv', delimiter=',', unpack=True)
#
##NKs = NKs*64434963 - 2000
#
#plt.plot(t, Kp2072/1e3, label="Cell 2072")
#plt.plot(t, Kp2518/1e3, label="Cell 2518")
#plt.plot(t, Kp2258/1e3, label="Cell 2258")
#plt.plot(t, Kp2254/1e3, label="Cell 2254")
#
#plt.legend(loc=1)
#
#plt.xlabel('Time (sec)')
#plt.ylabel('Perivascular K+ (mM)')
##plt.ylabel('ECS [K+] ($\mu$M)')
#
#fig = plt.gcf()
#fig.set_size_inches(4,6)
#plt.savefig("CSD_Kpplots.svg")
#t = []
#Ke2072 = []
#
#t, Ke2072 = np.loadtxt('csvFiles/KeCell2072.csv', delimiter=',', unpack=True)
#
##NKs = NKs*64434963 - 2000
#
#plt.plot(t, Ke2072)
#
#
#plt.legend(loc=1)
#
#plt.xlabel('Time (sec)')
#plt.ylabel('Extracellular K+ (mM)')
##plt.ylabel('ECS [K+] ($\mu$M)')
#
#
#fig = plt.gcf()
##fig.set_size_inches(4,6)
#plt.savefig("CSD_Keplot2.svg")
| gpl-2.0 |
effigies/PySurfer | surfer/utils.py | 4 | 24696 | import logging
import warnings
import sys
import os
from os import path as op
import inspect
from functools import wraps
import subprocess
import numpy as np
import nibabel as nib
from scipy import sparse
from scipy.spatial.distance import cdist
import matplotlib as mpl
from matplotlib import cm
logger = logging.getLogger('surfer')
# Py3k compat
if sys.version[0] == '2':
string_types = basestring # noqa
else:
string_types = str
class Surface(object):
"""Container for surface object
Attributes
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
data_path : string
Path where to look for data
x: 1d array
x coordinates of vertices
y: 1d array
y coordinates of vertices
z: 1d array
z coordinates of vertices
coords : 2d array of shape [n_vertices, 3]
The vertices coordinates
faces : 2d array
The faces ie. the triangles
nn : 2d array
Normalized surface normals for vertices.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment variable.
"""
def __init__(self, subject_id, hemi, surf, subjects_dir=None,
offset=None):
"""Surface
Parameters
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
offset : float | None
If 0.0, the surface will be offset such that the medial
wall is aligned with the origin. If None, no offset will
be applied. If != 0.0, an additional offset will be used.
"""
if hemi not in ['lh', 'rh']:
raise ValueError('hemi must be "lh" or "rh')
self.subject_id = subject_id
self.hemi = hemi
self.surf = surf
self.offset = offset
subjects_dir = _get_subjects_dir(subjects_dir)
self.data_path = op.join(subjects_dir, subject_id)
def load_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
self.coords, self.faces = nib.freesurfer.read_geometry(surf_path)
if self.offset is not None:
if self.hemi == 'lh':
self.coords[:, 0] -= (np.max(self.coords[:, 0]) + self.offset)
else:
self.coords[:, 0] -= (np.min(self.coords[:, 0]) + self.offset)
self.nn = _compute_normals(self.coords, self.faces)
def save_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
nib.freesurfer.write_geometry(surf_path, self.coords, self.faces)
@property
def x(self):
return self.coords[:, 0]
@property
def y(self):
return self.coords[:, 1]
@property
def z(self):
return self.coords[:, 2]
def load_curvature(self):
"""Load in curvature values from the ?h.curv file."""
curv_path = op.join(self.data_path, "surf", "%s.curv" % self.hemi)
self.curv = nib.freesurfer.read_morph_data(curv_path)
self.bin_curv = np.array(self.curv > 0, np.int)
def load_label(self, name):
"""Load in a Freesurfer .label file.
Label files are just text files indicating the vertices included
in the label. Each Surface instance has a dictionary of labels, keyed
by the name (which is taken from the file name if not given as an
argument.
"""
label = nib.freesurfer.read_label(op.join(self.data_path, 'label',
'%s.%s.label' % (self.hemi, name)))
label_array = np.zeros(len(self.x), np.int)
label_array[label] = 1
try:
self.labels[name] = label_array
except AttributeError:
self.labels = {name: label_array}
def apply_xfm(self, mtx):
"""Apply an affine transformation matrix to the x,y,z vectors."""
self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))],
mtx.T)[:, :3]
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
size[zidx] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
###############################################################################
# LOGGING (courtesy of mne-python)
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOG_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = "INFO"
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
def verbose(function):
"""Decorator to allow functions to override default log level
Do not call this function directly to set the global verbosity level,
instead use set_log_level().
Parameters (to decorated function)
----------------------------------
verbose : bool, str, int, or None
The level of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
None defaults to using the current log level [e.g., set using
mne.set_log_level()].
"""
arg_names = inspect.getargspec(function).args
# this wrap allows decorated functions to be pickled (e.g., for parallel)
@wraps(function)
def dec(*args, **kwargs):
# Check if the first arg is "self", if it has verbose, make it default
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
verbose_level = kwargs.get('verbose', default_level)
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
return function(*args, **kwargs)
# set __wrapped__ attribute so ?? in IPython gets the right source
dec.__wrapped__ = function
return dec
###############################################################################
# USEFUL FUNCTIONS
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# Otherwise, we're going to try and use matplotlib to create it
if cmap in dir(cm):
# This is probably a matplotlib colormap, so build from that
# The matplotlib colormaps are a superset of the mayavi colormaps
# except for one or two cases (i.e. blue-red, which is a crappy
# rainbow colormap and shouldn't be used for anything, although in
# its defense it's better than "Jet")
cmap = getattr(cm, cmap)
elif np.iterable(cmap):
# This looks like a list of colors? Let's try that.
colors = list(map(mpl.colors.colorConverter.to_rgb, cmap))
cmap = mpl.colors.LinearSegmentedColormap.from_list("_", colors)
else:
# If we get here, it's a bad input
raise ValueError("Input %s was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
@verbose
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
from scipy import sparse
logger.info("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.info("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
@verbose
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface)
geo.load_geometry()
if coord_as_vert:
coord = geo.coords[coord]
n_vertices = len(geo.coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(geo.coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.info("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = geo.coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None):
"""Determine whether the user has a usable fsaverage"""
fs_dir = op.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
if not op.isdir(fs_dir):
return False
if not op.isdir(op.join(fs_dir, 'surf')):
return False
return True
requires_fsaverage = np.testing.dec.skipif(not has_fsaverage(),
'Requires fsaverage subject data')
def has_ffmpeg():
"""Test whether the FFmpeg is available in a subprocess
Returns
-------
ffmpeg_exists : bool
True if FFmpeg can be successfully called, False otherwise.
"""
try:
subprocess.call(["ffmpeg"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
def assert_ffmpeg_is_available():
"Raise a RuntimeError if FFmpeg is not in the PATH"
if not has_ffmpeg():
err = ("FFmpeg is not in the path and is needed for saving "
"movies. Install FFmpeg and try again. It can be "
"downlaoded from http://ffmpeg.org/download.html.")
raise RuntimeError(err)
requires_ffmpeg = np.testing.dec.skipif(not has_ffmpeg(), 'Requires FFmpeg')
def ffmpeg(dst, frame_path, framerate=24, codec='mpeg4', bitrate='1M'):
"""Run FFmpeg in a subprocess to convert an image sequence into a movie
Parameters
----------
dst : str
Destination path. If the extension is not ".mov" or ".avi", ".mov" is
added. If the file already exists it is overwritten.
frame_path : str
Path to the source frames (with a frame number field like '%04d').
framerate : float
Framerate of the movie (frames per second, default 24).
codec : str | None
Codec to use (default 'mpeg4'). If None, the codec argument is not
forwarded to ffmpeg, which preserves compatibility with very old
versions of ffmpeg
bitrate : str | float
Bitrate to use to encode movie. Can be specified as number (e.g.
64000) or string (e.g. '64k'). Default value is 1M
Notes
-----
Requires FFmpeg to be in the path. FFmpeg can be downlaoded from `here
<http://ffmpeg.org/download.html>`_. Stdout and stderr are written to the
logger. If the movie file is not created, a RuntimeError is raised.
"""
assert_ffmpeg_is_available()
# find target path
dst = os.path.expanduser(dst)
dst = os.path.abspath(dst)
root, ext = os.path.splitext(dst)
dirname = os.path.dirname(dst)
if ext not in ['.mov', '.avi']:
dst += '.mov'
if os.path.exists(dst):
os.remove(dst)
elif not os.path.exists(dirname):
os.mkdir(dirname)
frame_dir, frame_fmt = os.path.split(frame_path)
# make the movie
cmd = ['ffmpeg', '-i', frame_fmt, '-r', str(framerate), '-b', str(bitrate)]
if codec is not None:
cmd += ['-c', codec]
cmd += [dst]
logger.info("Running FFmpeg with command: %s", ' '.join(cmd))
sp = subprocess.Popen(cmd, cwd=frame_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# log stdout and stderr
stdout, stderr = sp.communicate()
std_info = os.linesep.join(("FFmpeg stdout", '=' * 25, stdout))
logger.info(std_info)
if stderr.strip():
err_info = os.linesep.join(("FFmpeg stderr", '=' * 27, stderr))
logger.error(err_info)
# check that movie file is created
if not os.path.exists(dst):
err = ("FFmpeg failed, no file created; see log for more more "
"information.")
raise RuntimeError(err)
| bsd-3-clause |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue ([email protected]) and the Agg backend by John
Hunter ([email protected])
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| agpl-3.0 |
kevin-coder/tensorflow-fork | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 25 | 7883 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.cached_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
aricooperman/Jzipline | zipline/utils/tradingcalendar_lse.py | 35 | 5047 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# References:
# http://www.londonstockexchange.com
# /about-the-exchange/company-overview/business-days/business-days.htm
# http://en.wikipedia.org/wiki/Bank_holiday
# http://www.adviceguide.org.uk/england/work_e/work_time_off_work_e/
# bank_and_public_holidays.htm
import pytz
import pandas as pd
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end
start = datetime(2002, 1, 1, tzinfo=pytz.utc)
non_trading_rules = []
# Weekends
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
# New Year's Day
new_year = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
# If new years day is on Saturday then Monday 3rd is a holiday
# If new years day is on Sunday then Monday 2nd is a holiday
weekend_new_year = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
bymonthday=[2, 3],
byweekday=(rrule.MO),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_year)
non_trading_rules.append(weekend_new_year)
# Good Friday
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
# Easter Monday
easter_monday = rrule.rrule(
rrule.DAILY,
byeaster=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(easter_monday)
# Early May Bank Holiday (1st Monday in May)
may_bank = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(may_bank)
# Spring Bank Holiday (Last Monday in May)
spring_bank = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=datetime(2003, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(spring_bank)
# Summer Bank Holiday (Last Monday in August)
summer_bank = rrule.rrule(
rrule.MONTHLY,
bymonth=8,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(summer_bank)
# Christmas Day
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
# If christmas day is Saturday Monday 27th is a holiday
# If christmas day is sunday the Tuesday 27th is a holiday
weekend_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=27,
byweekday=(rrule.MO, rrule.TU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
non_trading_rules.append(weekend_christmas)
# Boxing Day
boxing_day = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
cache=True,
dtstart=start,
until=end
)
# If boxing day is saturday then Monday 28th is a holiday
# If boxing day is sunday then Tuesday 28th is a holiday
weekend_boxing_day = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=28,
byweekday=(rrule.MO, rrule.TU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day)
non_trading_rules.append(weekend_boxing_day)
non_trading_ruleset = rrule.rruleset()
# In 2002 May bank holiday was moved to 4th June to follow the Queens
# Golden Jubilee
non_trading_ruleset.exdate(datetime(2002, 9, 27, tzinfo=pytz.utc))
non_trading_ruleset.rdate(datetime(2002, 6, 3, tzinfo=pytz.utc))
non_trading_ruleset.rdate(datetime(2002, 6, 4, tzinfo=pytz.utc))
# TODO: not sure why Feb 18 2008 is not available in the yahoo data
non_trading_ruleset.rdate(datetime(2008, 2, 18, tzinfo=pytz.utc))
# In 2011 The Friday before Mayday was the Royal Wedding
non_trading_ruleset.rdate(datetime(2011, 4, 29, tzinfo=pytz.utc))
# In 2012 May bank holiday was moved to 4th June to preceed the Queens
# Diamond Jubilee
non_trading_ruleset.exdate(datetime(2012, 5, 28, tzinfo=pytz.utc))
non_trading_ruleset.rdate(datetime(2012, 6, 4, tzinfo=pytz.utc))
non_trading_ruleset.rdate(datetime(2012, 6, 5, tzinfo=pytz.utc))
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
non_trading_day_index = pd.DatetimeIndex(sorted(non_trading_days))
business_days = pd.DatetimeIndex(start=start, end=end,
freq=pd.datetools.BDay())
trading_days = business_days.difference(non_trading_day_index)
| apache-2.0 |
MrTheodor/espressopp | contrib/mpi4py/mpi4py-2.0.0/demo/mandelbrot/mandelbrot-master.py | 11 | 1466 | from mpi4py import MPI
import numpy as np
x1 = -2.0
x2 = 1.0
y1 = -1.0
y2 = 1.0
w = 600
h = 400
maxit = 255
import os
dirname = os.path.abspath(os.path.dirname(__file__))
executable = os.path.join(dirname, 'mandelbrot-worker.exe')
# spawn worker
worker = MPI.COMM_SELF.Spawn(executable, maxprocs=7)
size = worker.Get_remote_size()
# send parameters
rmsg = np.array([x1, x2, y1, y2], dtype='f')
imsg = np.array([w, h, maxit], dtype='i')
worker.Bcast([rmsg, MPI.REAL], root=MPI.ROOT)
worker.Bcast([imsg, MPI.INTEGER], root=MPI.ROOT)
# gather results
counts = np.empty(size, dtype='i')
indices = np.empty(h, dtype='i')
cdata = np.empty([h, w], dtype='i')
worker.Gather(sendbuf=None,
recvbuf=[counts, MPI.INTEGER],
root=MPI.ROOT)
worker.Gatherv(sendbuf=None,
recvbuf=[indices, (counts, None), MPI.INTEGER],
root=MPI.ROOT)
worker.Gatherv(sendbuf=None,
recvbuf=[cdata, (counts * w, None), MPI.INTEGER],
root=MPI.ROOT)
# disconnect worker
worker.Disconnect()
# reconstruct full result
M = np.zeros([h, w], dtype='i')
M[indices, :] = cdata
# eye candy (requires matplotlib)
try:
from matplotlib import pyplot as plt
plt.imshow(M, aspect='equal')
plt.spectral()
try:
import signal
def action(*args): raise SystemExit
signal.signal(signal.SIGALRM, action)
signal.alarm(2)
except:
pass
plt.show()
except:
pass
| gpl-3.0 |
jlegendary/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
marscher/bhmm | bhmm/_external/sklearn/utils.py | 3 | 12129 | '''
Created on 07.07.2015
@author: marscher
'''
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
import six
from inspect import getargspec
class NotFittedError(Exception):
pass
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array | lgpl-3.0 |
ttadano/alamode | tools/plotband.py | 1 | 9490 | #!/usr/bin/env python
#
# plotband.py
#
# Simple script to visualize phonon dispersion relations
#
# Copyright (c) 2014 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
import optparse
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
try:
mpl.use("Qt5agg")
except:
pass
import matplotlib.pyplot as plt
# parser options
usage = "usage: %prog [options] file1.bands file2.bands ... "
parser = optparse.OptionParser(usage=usage)
parser.add_option("--nokey", action="store_false", dest="print_key", default=True,
help="don't print the key in the figure")
parser.add_option("-u", "--unit", action="store", type="string", dest="unitname", default="kayser",
help="print the band dispersion in units of UNIT. Available options are kayser, meV, and THz", metavar="UNIT")
parser.add_option("--emin", action="store", type="float", dest="emin",
help="minimum value of the energy axis")
parser.add_option("--emax", action="store", type="float", dest="emax",
help="maximum value of the energy axis")
parser.add_option("--normalize", action="store_true", dest="normalize_xaxis", default=False,
help="normalize the x axis to unity.")
# font styles
mpl.rc('font', **{'family': 'Times New Roman', 'sans-serif': ['Helvetica']})
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=16)
mpl.rc('axes', labelsize=16)
mpl.rc('lines', linewidth=1.5)
mpl.rc('legend', fontsize='small')
# line colors and styles
color = ['b', 'g', 'r', 'm', 'k', 'c', 'y', 'r']
lsty = ['-', '-', '-', '-', '--', '--', '--', '--']
def get_kpath_and_kval(file_in):
ftmp = open(file_in, 'r')
kpath = ftmp.readline().rstrip('\n').split()
kval = ftmp.readline().rstrip('\n').split()
ftmp.close()
if kpath[0] == '#' and kval[0] == '#':
kval_float = [float(val) for val in kval[1:]]
kpath_list = []
for i in range(len(kpath[1:])):
if kpath[i + 1] == 'G':
kpath_list.append('$\Gamma$')
else:
kpath_list.append("$\mathrm{%s}$" % kpath[i + 1])
return kpath_list, kval_float
else:
return [], []
def change_scale(array, str_scale):
str_tmp = str_scale.lower()
if str_tmp == 'kayser':
print("Band structure will be shown in units of cm^{-1}")
return array
elif str_tmp == 'mev':
print("Band structure will be shown in units of meV")
kayser_to_mev = 0.0299792458 * 1.0e+12 * \
6.62606896e-34 / 1.602176565e-19 * 1000
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
array[i][j][k] *= kayser_to_mev
return array
elif str_tmp == 'thz':
print("Band structure will be shown in units of THz")
kayser_to_thz = 0.0299792458
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
array[i][j][k] *= kayser_to_thz
return array
else:
print("Unrecognizable option for --unit %s" % str_scale)
print("Band structure will be shown in units of cm^{-1}")
return array
def normalize_to_unity(array, array_axis):
for i in range(len(array)):
max_val = array[i][-1][0]
factor_normalize = 1.0 / max_val
for j in range(len(array[i])):
array[i][j][0] *= factor_normalize
max_val = array_axis[-1]
factor_normalize = 1.0 / max_val
for i in range(len(array_axis)):
array_axis[i] *= factor_normalize
return array, array_axis
def get_xy_minmax(array):
xmin, xmax, ymin, ymax = [0, 0, 0, 0]
for i in range(len(array)):
xtmp = array[i][-1][0]
xmax = max(xmax, xtmp)
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
ytmp = array[i][j][k]
ymin = min(ymin, ytmp)
ymax = max(ymax, ytmp)
return xmin, xmax, ymin, ymax
def gridspec_setup(data_merged, xtickslabels, xticksvars):
xmaxs = []
xmins = []
xticks_grids = []
xticklabels_grids = []
xticklabels_tmp = []
xticks_tmp = []
for i in range(len(xtickslabels)):
if i == 0:
xmins.append(xticksvars[0])
else:
if xticksvars[i] == xticksvars[i-1]:
xmaxs.append(xticksvars[i - 1])
xmins.append(xticksvars[i])
xticks_grids.append(xticks_tmp)
xticklabels_grids.append(xticklabels_tmp)
xticklabels_tmp = []
xticks_tmp = []
xticklabels_tmp.append(xtickslabels[i])
xticks_tmp.append(xticksvars[i])
xticks_grids.append(xticks_tmp)
xticklabels_grids.append(xticklabels_tmp)
xmaxs.append(xticksvars[-1])
naxes = len(xticks_grids)
nfiles = len(data_merged)
data_all_axes = []
for i in range(naxes):
data_ax = []
xmin_ax = xmins[i]
xmax_ax = xmaxs[i]
for j in range(nfiles):
kval = np.array(data_merged[j][0:, 0])
ix_xmin_arr = np.where(kval <= xmin_ax)
ix_xmax_arr = np.where(kval >= xmax_ax)
if len(ix_xmin_arr[0]) > 0:
ix_xmin = int(ix_xmin_arr[0][-1])
else:
ix_xmin = 0
if len(ix_xmax_arr[0]) > 0:
ix_xmax = int(ix_xmax_arr[0][0])
else:
ix_xmax = -2
data_ax.append(data_merged[j][ix_xmin:(ix_xmax+1), :])
data_all_axes.append(data_ax)
return naxes, xticks_grids, xticklabels_grids, xmins, xmaxs, data_all_axes
def preprocess_data(files, unitname, normalize_xaxis):
xtickslabels, xticksvars = get_kpath_and_kval(files[0])
data_merged = []
for file in files:
data_tmp = np.loadtxt(file, dtype=float)
data_merged.append(data_tmp)
data_merged = change_scale(data_merged, unitname)
if normalize_xaxis:
data_merged, xticksvars = normalize_to_unity(data_merged, xticksvars)
xmin, xmax, ymin, ymax = get_xy_minmax(data_merged)
if options.emin is None and options.emax is None:
factor = 1.05
ymin *= factor
ymax *= factor
else:
if options.emin is not None:
ymin = options.emin
if options.emax is not None:
ymax = options.emax
if ymin > ymax:
print("Warning: emin > emax")
naxes, xticks_grids, xticklabels_grids, xmins, xmaxs, data_merged_grids \
= gridspec_setup(data_merged, xtickslabels, xticksvars)
return naxes, xticks_grids, xticklabels_grids, \
xmins, xmaxs, ymin, ymax, data_merged_grids
def run_plot(nax, xticks_ax, xticklabels_ax, xmin_ax, xmax_ax, ymin, ymax, data_merged_ax):
fig = plt.figure()
width_ratios = []
for xmin, xmax in zip(xmin_ax, xmax_ax):
width_ratios.append(xmax - xmin)
gs = GridSpec(nrows=1, ncols=nax, width_ratios=width_ratios)
gs.update(wspace=0.1)
for iax in range(nax):
ax = plt.subplot(gs[iax])
for i in range(len(data_merged_ax[iax])):
if len(data_merged_ax[iax][i]) > 0:
ax.plot(data_merged_ax[iax][i][0:, 0], data_merged_ax[iax][i][0:, 1],
linestyle=lsty[i], color=color[i], label=files[i])
for j in range(2, len(data_merged_ax[iax][i][0][0:])):
ax.plot(data_merged_ax[iax][i][0:, 0], data_merged_ax[iax][i][0:, j],
linestyle=lsty[i], color=color[i])
if iax == 0:
if options.unitname.lower() == "mev":
ax.set_ylabel("Frequency (meV)", labelpad=20)
elif options.unitname.lower() == "thz":
ax.set_ylabel("Frequency (THz)", labelpad=20)
else:
ax.set_ylabel("Frequency (cm${}^{-1}$)", labelpad=10)
else:
ax.set_yticklabels([])
ax.set_yticks([])
plt.axis([xmin_ax[iax], xmax_ax[iax], ymin, ymax])
ax.set_xticks(xticks_ax[iax])
ax.set_xticklabels(xticklabels_ax[iax])
ax.xaxis.grid(True, linestyle='-')
if options.print_key and iax == 0:
ax.legend(loc='best', prop={'size': 10})
plt.show()
if __name__ == '__main__':
'''
Simple script for visualizing phonon dispersion relations.
Usage:
$ python plot_band.py [options] file1.bands file2.bands ...
For details of available options, please type
$ python plot_band.py -h
'''
options, args = parser.parse_args()
files = args[0:]
nfiles = len(files)
if nfiles == 0:
print("Usage: plotband.py [options] file1.bands file2.bands ...")
print("For details of available options, please type\n$ python plotband.py -h")
exit(1)
else:
print("Number of files = %d" % nfiles)
nax, xticks_ax, xticklabels_ax, xmin_ax, xmax_ax, ymin, ymax, \
data_merged_ax = preprocess_data(
files, options.unitname, options.normalize_xaxis)
run_plot(nax, xticks_ax, xticklabels_ax,
xmin_ax, xmax_ax, ymin, ymax, data_merged_ax)
| mit |
rajul/mne-python | examples/visualization/plot_topo_channel_epochs_image.py | 22 | 1861 | """
============================================================
Visualize channel over epochs as images in sensor topography
============================================================
This will produce what is sometimes called event related
potential / field (ERP/ERF) images.
One sensor topography plot is produced with the evoked field images from
the selected channels.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
layout = mne.find_layout(epochs.info, 'meg') # use full layout
title = 'ERF images - MNE sample data'
mne.viz.plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title)
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
0x0all/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_shareddense.py | 7 | 17280 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_shareddense.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_shareddense.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
l3f = layers.FlattenLayer(l3s)
l4a = layers.DenseLayer(l3f, n_outputs=512, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
j4 = layers.MultiRotMergeLayer(l4, num_views=4) # 2) # merge convolutional parts
l5a = layers.DenseLayer(j4, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l5 = layers.FeatureMaxPoolingLayer(l5a, pool_size=2, feature_dim=1, implementation='reshape')
l6a = layers.DenseLayer(l5, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l6a) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
rizac/gfzreport | gfzreport/sphinxbuild/map/__init__.py | 2 | 43603 | '''
This module implements the function `plotmap` which plots scattered points on a map
retrieved using ArgGIS Server REST API. The function is highly customizable and is basically a
wrapper around the `Basemap` library (for the map background)
plus matplotlib utilities (for plotting points, shapes, labels and legend)
Created on Mar 10, 2016
@author: riccardo
'''
import numpy as np
import re
from itertools import izip, chain
from urllib2 import URLError, HTTPError
import socket
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.basemap import Basemap
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def parse_margins(obj, parsefunc=lambda margins: [float(val) for val in margins]):
"""Parses obj returning a 4 element numpy array denoting the top, right, bottom and left
values. This function first converts obj to a 4 element list L, and then
calls `parsefunc`, which by default converts all L values into float
:param obj: either None, a number, a list of numbers (allowed lengths: 1 to 4),
a comma/semicolon/spaces separated string (e.g. "4deg 0.0", "1, 1.2", "2km,4deg", "1 ; 2")
:param parsefunc: a function to be applied to obj converted to list. By default, returns
float(v) for any v in L
:return: a 4 element numpy array of floats denoting the top, right, bottom, left values of
the margins. The idea is the same as css margins, as depicted in the table below.
:Examples:
Called f `parsefunc`, then:
============= =========================
obj is returns
============= =========================
None [0, 0, 0, 0]
------------- -------------------------
string the list obtained after
splitting string via
regexp where comma,
semicolon and spaces
are valid separators
------------- -------------------------
x or [x] parsefunc([x, x, x, x])
------------- -------------------------
[x, y] parsefunc([x, y ,x, y])
------------- -------------------------
[x, y, z] parsefunc([x, y, z, y])
------------- -------------------------
[x, y, z, t] parsefunc([x, y, z, t])
============= =========================
"""
if obj is None:
margins = [0] * 4
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
# is an iterable not string. Note the if above is py2 py3 compatible
margins = list(obj)
else:
try:
margins = [float(obj)] * 4
except (TypeError, ValueError):
margins = re.compile("(?:\\s*,\\s*|\\s*;\\s*|\\s+)").split(obj)
if len(margins) == 1:
margins *= 4
elif len(margins) == 2:
margins *= 2
elif len(margins) == 3:
margins.append(margins[1])
elif len(margins) != 4:
raise ValueError("unable to parse margins on invalid value '%s'" % obj)
return np.asarray(parsefunc(margins) if hasattr(parsefunc, "__call__") else margins)
# return margins
def parse_distance(dist, lat_0=None):
"""Returns the distance in degrees. If dist is in km or m, and lat_0 is not None,
returns w2lon, else h2lat. dist None defaults to 0
:param dist: float, int None, string. If string and has a unit, see above
"""
try:
return 0 if dist is None else float(dist)
except ValueError:
if dist[-3:].lower() == 'deg':
return float(dist[:-3])
elif dist[-2:] == 'km':
dst = 1000 * float(dist[:-2])
elif dist[-1:] == 'm':
dst = float(dist[:1])
else:
raise
return w2lon(dst, lat_0) if lat_0 is not None else h2lat(dst)
def get_lon0_lat0(min_lons, min_lats, max_lons, max_lats):
""" Calculates lat_0, lon_0, i.e., the mid point of the bounding box denoted by the
arguments
:param min_lons: the minimum of longitudes
:param min_lats: the maximum of latitudes
:param max_lons: the minimum of longitudes
:param max_lats: the maximum of latitudes
:return: the 2-element tuple denoting the mid point lon_0, lat_0
"""
lat_0 = max_lats / 2. + min_lats / 2.
lon_0 = max_lons / 2. + min_lons / 2.
if lon_0 > 180: # FIXME: necessary?? see self.get_normalized... above
lon_0 -= 360
return lon_0, lat_0
def getbounds(min_lon, min_lat, max_lon, max_lat, margins):
"""Calculates the bounds given the bounding box identified by the arguments and
given optional margins
:param min_lon: the minimum longitude (numeric, scalar)
:param min_lat: the maximum latitude (numeric, scalar)
:param max_lon: the minimum longitude (numeric, scalar)
:param max_lat: the maximum latitude (numeric, scalar)
:param margins: the margins as a css-like string (with units 'deg', 'km' or 'm'), or as
a 1 to 4 element array of numeric values (in that case denoting degrees).
As in css, a 4 element array denotes the [top, right, bottom, left] values.
None defaults to [0, 0, 0, 0].
:return: the 6-element tuple denoting lon_0, lat_0, min_lon, min_lat, max_lon, max_lat.
where min_lon, min_lat, max_lon, max_lat are the new bounds and lon_0 and lat_0 are
their midpoints (x and y, respectively)
"""
def parsefunc(mrgns):
"""parses mrgns as array of strings into array of floats
"""
return parse_distance(mrgns[0]), parse_distance(mrgns[1], max_lat), \
parse_distance(mrgns[2]), parse_distance(mrgns[3], min_lat)
top, right, btm, left = parse_margins(margins, parsefunc)
min_lon, min_lat, max_lon, max_lat = min_lon-left, min_lat-btm, max_lon+right, max_lat+top
if min_lon == max_lon:
min_lon -= 10 # in degrees
max_lon += 10 # in degrees
if min_lat == max_lat:
min_lat -= 10 # in degrees
max_lat += 10 # in degrees
# minima must be within bounds:
min_lat = max(-90, min_lat)
max_lat = min(90, max_lat)
min_lon = max(-180, min_lon)
max_lon = min(180, max_lon)
lon_0, lat_0 = get_lon0_lat0(min_lon, min_lat, max_lon, max_lat)
return lon_0, lat_0, min_lon, min_lat, max_lon, max_lat
# static constant converter (degree to meters and viceversa) for latitudes
DEG2M_LAT = 2 * np.pi * 6371 * 1000 / 360
def lat2h(distance_in_degrees):
"""converts latitude distance from degrees to height in meters
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # 2 * np.pi * 6371 * 1000 / 360
return distance_in_degrees * deg2m_lat
def h2lat(distance_in_meters):
"""converts latitude distance from height in meters to degrees
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
return distance_in_meters / deg2m_lat
def lon2w(distance_in_degrees, lat_0):
"""converts longitude distance from degrees to width in meters
:param distance_in_degrees: a distance (python scalar or numpy array)
along the lat_0 circle expressed in degrees
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude of the circle along
which the lon2w(distance_in_degrees) must be converted to meters"""
deg2m_lat = DEG2M_LAT
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_degrees * deg2m_lon
def w2lon(distance_in_meters, lat_0):
"""converts longitude distance from width in meters to degrees
:param distance_in_meters: a distance (python scalar or numpy array)
along the lat_0 circle expressed in meters
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude (in degrees) of the
circle along which w2lon(distance_in_meters) must be converted to degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_meters / deg2m_lon
class MapHandler(object):
"""
Class handling bounds of a map given points (lons and lats)
"""
def __init__(self, lons, lats, map_margins):
"""Initializes a new MapHandler. If figure here is None, you **MUST**
call self.set_fig(fig) to calculate bounds and other stuff
when you have a ready figure"""
self.lons = lons if len(lons) else [0] # FIXME: use numpy arrays!!
self.lats = lats if len(lats) else [0]
self.max_lons, self.min_lons = max(self.lons), min(self.lons)
self.max_lats, self.min_lats = max(self.lats), min(self.lats)
self.lon_0, self.lat_0, self.llcrnrlon, self.llcrnrlat, self.urcrnrlon, self.urcrnrlat = \
getbounds(self.min_lons, self.min_lats, self.max_lons, self.max_lats, map_margins)
def _get_map_dims(self): # , fig_size_in_inches, colorbar=False):
"""Returns the map dimension width, height, in meters"""
max_lons, min_lons = self.urcrnrlon, self.llcrnrlon
max_lats, min_lats = self.urcrnrlat, self.llcrnrlat
height = lat2h(max_lats - min_lats)
width = lon2w(max_lons - min_lons, self.lat_0)
return width, height
def get_parallels(self, max_labels_count=8):
width, height = self._get_map_dims()
lat_0 = self.lat_0
N1 = int(np.ceil(height / max(width, height) * max_labels_count))
parallels = MapHandler._linspace(lat_0 - h2lat(height / 2),
lat_0 + h2lat(height / 2), N1)
return parallels
def get_meridians(self, max_labels_count=8):
width, height = self._get_map_dims()
lon_0 = self.lon_0
lat_0 = self.lat_0
N2 = int(np.ceil(width / max(width, height) * max_labels_count))
meridians = MapHandler._linspace(lon_0 - w2lon(width / 2, lat_0),
lon_0 + w2lon(width / 2, lat_0), N2)
meridians[meridians > 180] -= 360
return meridians
@staticmethod
def _linspace(val1, val2, N):
"""
returns around N 'nice' values between val1 and val2. Copied from obspy.plot_map
"""
dval = val2 - val1
round_pos = int(round(-np.log10(1. * dval / N)))
# Fake negative rounding as not supported by future as of now.
if round_pos < 0:
factor = 10 ** (abs(round_pos))
delta = round(2. * dval / N / factor) * factor / 2
else:
delta = round(2. * dval / N, round_pos) / 2
new_val1 = np.ceil(val1 / delta) * delta
new_val2 = np.floor(val2 / delta) * delta
N = (new_val2 - new_val1) / delta + 1
return np.linspace(new_val1, new_val2, N)
def _normalize(obj, size=None, dtype=None):
""""Casts" obj to a numpy array of the given optional size and optional dtype, and returns it.
If size is not None, the array must have length size. If not, and has length 1, it will be
resized to the specified size. Otherwise a ValueError is raised
If size is None, no resize will be in place and the array is returend as it is
Note: obj=None will be converted to the array [None], apparently in the current version of numpy
this wouldn't be the default (see argument ndmin=1)
:return an numpy array resulting to the coinversion of obj into array
:Examples:
"""
x = np.array(obj, ndmin=1) if dtype is None else np.array(obj, ndmin=1, dtype=dtype)
if size is None:
return np.array([]) if obj is None else x # if obj is None x is [None], return [] instead
try:
if len(x) == 1:
x = np.resize(x, size)
elif len(x) != size:
raise ValueError("invalid array length: %d. Expected %d" % (len(x), size))
except (ValueError, TypeError) as _err:
raise ValueError(str(_err))
return x
def torgba(html_str):
"""Converts html_str into a tuple of rgba colors all in [0, 1]
Curiously, matplotlib color functions do not provide this functionality for
'#RGBA' color formats
:param html_str: a valid html string in hexadecimal format.
Can have length 4, 7 or 9 such as #F1a, #fa98e3, #fc456a09
:return: a rgba vector, i.e. a 4-element numpy array of values in [0,1] denoting `html_str`
:raise: ValueError if html_str is invalid
"""
if len(html_str) not in (4, 7, 9) or not html_str[0] == '#':
raise ValueError("'%s' invalid html string" % html_str)
elif len(html_str) == 4:
rgb = [html_str[i:i+1]*2 for i in xrange(1, len(html_str))]
else:
rgb = [html_str[i:i+2] for i in xrange(1, len(html_str), 2)]
if len(rgb) == 3:
rgb += ['FF']
return np.true_divide(np.array([int(r, 16) for r in rgb]), 255)
def _shapeargs(lons, lats, labels, sizes, colors, markers, legend_labels):
lons = _normalize(lons, dtype=float) # basically: convert to float array if scalar (size=0)
lats = _normalize(lats, dtype=float) # basically: convert to float array if scalar (size=0)
if len(lons) != len(lats):
raise ValueError('mismatch in lengths: lons (%d) and lats (%d)' % (len(lons), len(lats)))
leng = len(lons)
labels = _normalize(labels, size=leng)
colors = _normalize(colors, size=leng)
markers = _normalize(markers, size=leng)
legend_labels = _normalize(legend_labels, size=leng)
# colors[np.isnan(colors) | (colors <= 0)] = 1.0 # nan colors default to 1 (black?)
sizes = _normalize(sizes, size=leng, dtype=float)
valid_points = np.logical_not(np.isnan(lons) | np.isnan(lats) | (sizes <= 0))
# return all points whose corresponding numeric values are not nan:
return (lons[valid_points],
lats[valid_points],
labels[valid_points],
sizes[valid_points],
colors[valid_points],
markers[valid_points],
legend_labels[valid_points])
# def get_ax_size(ax, fig):
# bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# return bbox.width, bbox.height
def pix2inch(pix, fig):
"""Converts pixel to inches on a given matplotlib figure"""
return pix / fig.dpi
def inch2pix(inch, fig):
"""Converts inches to pixel on a given matplotlib figure"""
return inch * fig.dpi
def _joinargs(key_prefix, kwargs, **already_supplied_args):
'''updates already_supplied_args with kwargs using a given prefix in kwargs to identify
common keys. Used in plotmap for kwargs'''
key_prefix += "_"
len_prefix = len(key_prefix)
already_supplied_args.update({k[len_prefix:]: v
for k, v in kwargs.iteritems() if k.startswith(key_prefix)})
return already_supplied_args
def _mp_set_custom_props(drawfunc_retval, lines_props, labels_props):
"""Sets custom properties on drawparallels or drawmeridians return function.
drawfunc_retval is a dict of numbers mapped to tuples where the first element is a list of
matplotlib lines, and the second element is a list of matplotlib texts"""
_setprop(chain.from_iterable((lin for lin, lab in drawfunc_retval.itervalues())), lines_props)
_setprop(chain.from_iterable((lab for lin, lab in drawfunc_retval.itervalues())), labels_props)
def _setprop(iterator_of_mp_objects, props):
'''sets the given properties of an iterator of same type matplotlib objects'''
if not props:
return
prp = {}
for obj in iterator_of_mp_objects:
if not prp:
prp = {"set_%s" % name: val for name, val in props.iteritems()
if hasattr(obj, "set_%s" % name)}
for name, val in prp.iteritems():
getattr(obj, name)(val)
# values below CAN be None but CANNOT be arrays containing None's
def plotmap(lons,
lats,
labels=None,
legendlabels=None,
markers="o",
colors="#FF4400",
sizes=20,
cmap=None,
fontsize=None,
fontweight='regular',
fontcolor='k',
labels_h_offset=0,
labels_v_offset=0,
mapmargins='0.5deg',
figmargins=2,
arcgis_service='World_Street_Map',
arcgis_xpixels=1500,
arcgis_dpi=96,
urlfail='ignore',
maxmeridians=5,
maxparallels=5,
legend_pos='bottom',
legend_borderaxespad=1.5,
legend_ncol=1,
title=None,
show=False,
**kwargs): # @UnusedVariable
"""
Makes a scatter plot of points on a map background using ArcGIS REST API.
:param lons: (array-like of length N or scalar) Longitudes of the data points, in degreee
:param lats: (array-like of length N or scalar) Latitudes of the data points, in degree
:param labels: (array-like of length N or string. Default: None, no labels) Annotations
(labels) for the individual data points on the map. If non-array (e.g. string), the same value
will be applied to all points
:param legendlabels: (array-like of length N or string. Default: None, no legend)
Annotations (labels) for the legend. You can supply a sparse array where only some points
will be displayed on the legend. All points with no legend label will not show up in the
legend
:param sizes: (array-like of length N or number. Default: 20) Sizes (in points^2) of the
individual points in the scatter plot.
:param markers: (array-like of length N,
`MarkerStyle<http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle>`_ or
string. Default: 'o' - circle) The markers (shapes) to be drawn for each point on the map.
See `markers <http://matplotlib.org/api/markers_api.html#module-matplotlib.markers>`_ for
more information on the different styles of markers scatter supports. Marker can be either
an instance of the class or the text shorthand for a particular marker.
:param colors: (array-like of length N,
`matplotlib color <http://matplotlib.org/api/colors_api.html>`_, e.g. string.
Default: "#FF4400")
Colors for the markers (fill color). You can type color transparency by supplying string of 9
elements where the last two characters denote the transparency ('00' fully transparent,
'ff' fully opaque). Note that this is a feature not implemented in `matplotlib` colors, where
transparency is given as the last element of the numeric tuple (r, g, b, a)
:param fontsize: (numeric or None. Default: None) The fontsize for all texts drawn on the
map (labels, axis tick labels, legend). None uses the default figure font size for all. Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontweight: (string or number. Default: 'regular') The font weight for all texts drawn
on the map (labels, axis tick labels, legend). Accepts the values (see
http://matplotlib.org/api/text_api.html#matplotlib.text.Text.set_weight):
```
[a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' |
'bold' | 'heavy' | 'extra bold' | 'black' ]
```
Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontcolor: (`matplotlib color <http://matplotlib.org/api/colors_api.html>`_ or
string. Default: 'k', black) The font color for all texts drawn on the
map (labels, axis tick labels, legend). Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param labels_h_offset: (string, number. Defaults None=0) The horizontal offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels westward, positive values eastward. Useful for not overlapping
markers and labels.
If numeric, it is assumed to be the expressed in degrees. Otherwise, you can supply a string
with a number followed by one of the units 'm', 'km' or 'deg' (e.g., '5km', '0.5deg').
Note that this value affects the
`horizontalalignment` and `multialignment` properties of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_horizontalalignment` or `labels_ha` as optional argument will override
this behaviour (see `kwargs` below)
:param labels_v_offset: (string, number. Defaults None=0) The vertical offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels southhward, positive values northward. See notes on `labels_h_offset` for details
Note that this value affects the
`verticalalignment` property of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_verticalalignment` or `labels_va` as optional argument will override
this behaviour (see `kwargs` below)
:param mapmargins: (array-like of 1,2,3,4 elements, numeric or string, or None=0.
Default: '0.5deg').
The map margins, i.e. how much the map has to 'expand/shrink' in any direction, relative
to the bounding box calculated to include all points.
If array-like, it behaves like the css 'margin' property of html: 4 elements will denote
[top, right, bottom, left], two elements will denote [top/bottom, left/right], three
elements [top, right/left, bottom], a single element array (or a single number or a string)
applies the value to all directions.
Finally, elements of the array must be expressed as the arguments `labels_h_offset` or
`labels_v_offset`: numbers denoting degrees or strings with units 'm', 'km', 'deg'. Negative
values will shrink the map.
If string, the argument will be first splitted using commas, semicolon or spaces as delimiters
(if no delimiter is found, the string is taken as a single chunk) and converted to an array-like
object.
:param figmargins: (array-like of 1,2,3,4 elements, number or None=0. Default:2) The
figure margins *in font height units* (e.g., 2 means: twice the font height). This argument
behaves exactly as `mapmargins` but expands/shrinks the distances between map and figure
(image) bounds. Useful to include axis tick labels or legend, if they overflow.
Note also that strings
are allowed only if they are parsable to float (e.g. "5,6; -12 1")
:param arcgis_service: (string, default: 'World_Street_Map'). The map image type, or
more technically the service for the map
hosted on ArcGIS server. Other values are 'ESRI_Imagery_World_2D'
(default in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_),
'World_Topo_Map', 'World_Terrain_Base'. For details, see:
http://server.arcgisonline.com/arcgis/rest/services.
:param arcgis_xpixels: (numeric, default: 3000). Requested number of image pixels
in x-direction (default is 400 in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_).
The documentation is quite unclear but this parameter seems to set the zoom of the image. From
this `link <http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage>`_:
A bigger number will ask a bigger image, so the image will have more detail.
So when the zoom is bigger, `xsize` must be bigger to maintain the resolution
:param urlfail: (string, 'raise' or 'ignore'. Default: 'ignore'). Tells what to do if the
ArcGIS requet fails (URLError, no internet connection etcetera). By default, on failure a raw
map with continents contour, and oceans will be plotted (good for
debug). Otherwise, the exception resulting from the web request is raised
:param maxmeridians: (numeric default: 5). The number of maximum meridians to be drawn. Set to
<=0 to hide meridians. Note that also x-axis labels are drawn.
To further manipulate meridians display, use any argument starting with
'mlabels_', 'mlines_' or 'meridians' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `meridians_linewidth=0` or 'mlines_linewidth=0'.
:param maxparallels: (numeric default: 5). The number of maximum parallels to be drawn. Set to
<=0 to hide parallels. Note that also y-axis labels are drawn.
To further manipulate parallels display, use any argument starting with
'plabels_', 'plines_' or 'parallels' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `parallels_linewidth=0` or 'plines_linewidth=0'.
:param legend_pos: (string in ['top'. 'bottom', 'right', 'left'], default='bottom'). The legend
location with respect to the map. It also adjusts the bounding box that the legend will be
anchored to.
For
customizing entirely the legend placement overriding this parameter, provide `legend_loc`
(and optionally `legend_bbox_to_anchor`) in `kwargs` (see below)
:param legend_borderaxespad: (numeric, default 1.5) The pad between the axes and legend border,
in font units
:param legend_ncol: (integer, default=1) The legend number of columns
:param title (string or None. Default: None): Title above plot (Note: not tested)
:param show (boolean, default: False): Whether to show the figure after plotting or not
(Note: not tested). Can be used to do further customization of the plot before showing it.
:param fig: (matplotlib figure or None, default: None). Note: deprecated, pass None as
supplying an already existing figure with other axes might break the figure layout
:param kwargs: any kind of additional argument passed to `matplotlib` and `Basemap` functions
or objects.
The name of the argument must be of the form
```
prefix_propertyname=propertyvalue
```
where prefix indicates the function/object to be called with keyword argument:
```
propertyname=propertyvalue
```
Current supported prefixes are (for available property names see links):
Prefix Passes `propertyname` to
============ ==================================================================================
arcgis `Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>_
used to retrieve the background map using ArgGIS Server REST API. See also
http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage
basemap `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap>`_
the object responsible of drawing and managing the map. Note that
`basemap_resolution=h` and `basemap_epsg=4326` by default.
labels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the point labels on the map
legend The `legend <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_.
See the already implemented arguments `legend_borderaxespad`,
`legend_ncol`
legendlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the text labels of the legend
meridians `Basemap.drawmeridians`. For more detailed settings on meridians, see
`mlines` and `mlabels`
parallels `Basemap.drawparallels`. For more detailed settings on parallels, see
`plines` and `plabels`
plines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the parallels
plabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the parallels labels on the y axis
mlines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the meridians
mlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the meridians labels on the x axis
============ ==================================================================================
Examples
--------
- `legend_title='abc'` will call `legend(..., title='abc', ...)`
- `labels_path_effects=[PathEffects.withStroke(linewidth=2, foreground='white')]` will set the
a white contour around each label text
- `meridians_labelstyle="+/-"` will call `Basemap.drawmeridians(..., labelstyle="+/-", ...)`
Notes:
------
The objects referenced by `plines`, `plabels`, `mlines`, `mlabels` and `legendlabels`
cannot be initialized directly with the given properties, which will be set after they are
created assuming that for any property `foo` passed as keyword argument in their constructor
there exist a method `set_foo(...)` (which will be called with the given propertyvalue).
This is most likely always true according to matplotlib api, but we cannot assure it works
100% of the times
"""
lons, lats, labels, sizes, colors, markers, legendlabels =\
_shapeargs(lons, lats, labels, sizes, colors, markers, legendlabels)
# convert html strings to tuples of rgba values in [0.1] if the former are in string format,
# because (maybe too old matplotlib version?) colors in the format '#RGBA' are not supported
# Also, if cmap is provided, basemap.scatter calls matplotlib.scatter which
# wants float sequenes in case of color map
if colors.dtype.char in ('U', 'S'): # pylint: disable=no-member
colors = np.array([torgba(c) for c in colors])
fig = plt.figure()
map_ax = fig.add_axes([0, 0, 1, 1]) # set axes size the same as figure
# setup handler for managing basemap coordinates and meridians / parallels calculation:
handler = MapHandler(lons, lats, mapmargins)
kwa = _joinargs('basemap', kwargs,
llcrnrlon=handler.llcrnrlon,
llcrnrlat=handler.llcrnrlat,
urcrnrlon=handler.urcrnrlon,
urcrnrlat=handler.urcrnrlat,
epsg='4326', # 4326, # 3395, # 3857,
resolution='i', # 'h',
ax=map_ax)
bmap = Basemap(**kwa)
try:
kwa = _joinargs("arcgis", kwargs, service=arcgis_service, xpixels=arcgis_xpixels,
dpi=arcgis_dpi)
# set the map image via a map service. In case you need the returned values, note that
# This function returns an ImageAxis (or AxisImage, check matplotlib doc)
bmap.arcgisimage(**kwa)
except (URLError, HTTPError, socket.error) as exc:
# failed, maybe there is not internet connection
if urlfail == 'ignore':
# Print a simple map offline
bmap.drawcoastlines()
watercolor = '#4444bb'
bmap.fillcontinents(color='#eebb66', lake_color=watercolor)
bmap.drawmapboundary(fill_color=watercolor)
else:
raise
# draw meridians and parallels. From basemap.drawmeridians / drawparallels doc:
# returns a dictionary whose keys are the meridian values, and
# whose values are tuples containing lists of the
# matplotlib.lines.Line2D and matplotlib.text.Text instances
# associated with each meridian. Deleting an item from the
# dictionary removes the correpsonding meridian from the plot.
if maxparallels > 0:
kwa = _joinargs("parallels", kwargs, linewidth=1, fontsize=fontsize,
labels=[0, 1, 1, 0], fontweight=fontweight)
parallels = handler.get_parallels(maxparallels)
# Old basemap versions have problems with non-integer parallels.
try:
# Note: the method below # returns a list of text object
# represeting the tick labels
_dict = bmap.drawparallels(parallels, **kwa)
except KeyError:
parallels = sorted(list(set(map(int, parallels))))
_dict = bmap.drawparallels(parallels, **kwa)
# set custom properties:
kwa_lines = _joinargs("plines", kwargs)
kwa_labels = _joinargs("plabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
if maxmeridians > 0:
kwa = _joinargs("meridians", kwargs, linewidth=1, fontsize=fontsize,
labels=[1, 0, 0, 1], fontweight=fontweight)
meridians = handler.get_meridians(maxmeridians)
_dict = bmap.drawmeridians(meridians, **kwa)
# set custom properties:
kwa_lines = _joinargs("mlines", kwargs)
kwa_labels = _joinargs("mlabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
# fig.get_axes()[0].tick_params(direction='out', length=15) # does not work, check basemap
fig.bmap = bmap
# compute the native bmap projection coordinates for events.
# from the docs (this is kind of outdated, however leave here for the moment):
# Calling a Basemap class instance with the arguments lon, lat will
# convert lon/lat (in degrees) to x/y map projection
# coordinates (in meters). If optional keyword ``inverse`` is
# True (default is False), the inverse transformation from x/y
# to lon/lat is performed.
# For cylindrical equidistant projection (``cyl``), this
# does nothing (i.e. x,y == lon,lat).
# For non-cylindrical projections, the inverse transformation
# always returns longitudes between -180 and 180 degrees. For
# cylindrical projections (self.projection == ``cyl``,
# ``cea``, ``mill``, ``gall`` or ``merc``)
# the inverse transformation will return longitudes between
# self.llcrnrlon and self.llcrnrlat.
# Input arguments lon, lat can be either scalar floats,
# sequences, or numpy arrays.
# parse hoffset and voffset and assure they are at least arrays of 1 elements
# (for aligning text labels, see below)
hoffset = np.array(parse_distance(labels_h_offset, lats), copy=False, ndmin=1)
voffset = np.array(parse_distance(labels_v_offset), copy=False, ndmin=1)
lbl_lons = lons + hoffset
lbl_lats = lats + voffset
# convert labels coordinates:
xlbl, ylbl = bmap(lbl_lons, lbl_lats)
# plot point labels
max_points = -1 # negative means: plot all
if max_points < 0 or len(lons) < max_points:
# Set alignments which control also the corner point reference when placing labels
# from (FIXME: add ref?)
# horizontalalignment controls whether the x positional argument for the text indicates
# the left, center or right side of the text bounding box.
# verticalalignment controls whether the y positional argument for the text indicates
# the bottom, center or top side of the text bounding box.
# multialignment, for newline separated strings only, controls whether the different lines
# are left, center or right justified
ha = 'left' if hoffset[0] > 0 else 'right' if hoffset[0] < 0 else 'center'
va = 'bottom' if voffset[0] > 0 else 'top' if voffset[0] < 0 else 'center'
ma = ha
kwa = _joinargs("labels", kwargs, fontweight=fontweight, color=fontcolor,
zorder=100, fontsize=fontsize, horizontalalignment=ha,
verticalalignment=va, multialignment=ma)
for name, xpt, ypt in zip(labels, xlbl, ylbl):
# Check if the point can actually be seen with the current bmap
# projection. The bmap object will set the coordinates to very
# large values if it cannot project a point.
if xpt > 1e25:
continue
map_ax.text(xpt, ypt, name, **kwa)
# plot points
x, y = bmap(lons, lats)
# store handles to points, and relative labels, if any
leg_handles, leg_labels = [], []
# bmap.scatter accepts all array-like args except markers. Avoid several useless loops
# and do only those for distinct markers:
# unique markers (sorted according to their index in markers, not their value):
mrks = markers[np.sort(np.unique(markers, return_index=True)[1])]
for mrk in mrks:
# Note using masks with '==' (numpy==1.11.3):
#
# >>> a = np.array([1,2,3])
# >>> a == 3
# array([False, False, True], dtype=bool) # OK
# >>> a == None
# False # NOT AS EXPECTED!
# >>> np.equal(a, None)
# array([False, False, False], dtype=bool) # OK
#
# (Note also that a == None issues:
# FutureWarning: comparison to `None` will result in an elementwise object
# comparison in the future.)
#
# So the correct way is to write
# mask = np.equal(array, val) if val is None else (a == val)
m_mask = np.equal(markers, mrk) if mrk is None else markers == mrk # see above
__x = x[m_mask]
__y = y[m_mask]
__m = mrk
__s = sizes[m_mask]
__c = colors[m_mask]
__l = legendlabels[m_mask]
# unique legends (sorted according to their index in __l, not their value):
for leg in __l[np.sort(np.unique(__l, return_index=True)[1])]:
l_mask = np.equal(__l, leg) if leg is None else __l == leg # see above
_scatter = bmap.scatter(__x[l_mask],
__y[l_mask],
marker=mrk,
s=__s[l_mask],
c=__c[l_mask],
cmap=cmap,
zorder=10)
if leg:
leg_handles.append(_scatter)
leg_labels.append(leg)
if leg_handles:
# if we provided `legend_loc`, use that:
loc = kwargs.get('legend_loc', None)
bbox_to_anchor = None # defaults in matplotlib legend
# we do have legend to show. Adjust legend reference corner:
if loc is None:
if legend_pos == 'bottom':
loc = 'upper center'
bbox_to_anchor = (0.5, -0.05)
elif legend_pos == 'top':
loc = 'lower center'
bbox_to_anchor = (0.5, 1.05)
elif legend_pos == 'left':
loc = 'center right'
bbox_to_anchor = (-0.05, 0.5)
elif legend_pos == 'right':
loc = 'center left'
bbox_to_anchor = (1, 0.5)
else:
raise ValueError('invalid legend_pos value:"%s"' % legend_pos)
# The plt.legend has the prop argument which sets the font properties:
# family, style, variant, weight, stretch, size, fname. See
# http://matplotlib.org/api/font_manager_api.html#matplotlib.font_manager.FontProperties
# However, that property does not allow to set font color. So we
# use the get_text method of Legend. Note that we pass font size *now* even if
# setting it later works as well (the legend frame is resized accordingly)
kwa = _joinargs("legend", kwargs, scatterpoints=1, ncol=legend_ncol, loc=loc,
bbox_to_anchor=bbox_to_anchor, borderaxespad=legend_borderaxespad,
fontsize=fontsize)
# http://stackoverflow.com/questions/17411940/matplotlib-scatter-plot-legend
leg = map_ax.legend(leg_handles, leg_labels, **kwa)
# set properties supplied via 'legend_'
_setprop(leg.get_texts(), _joinargs("legendlabels", kwargs, color=fontcolor))
# re-position the axes. The REAL map aspect ratio seems to be this:
realratio_h_w = bmap.aspect
fig_w, fig_h = fig.get_size_inches()
figratio_h_w = np.true_divide(fig_h, fig_w)
if figratio_h_w >= realratio_h_w:
# we have margins (blank space) above and below
# thus, we assume:
map_w = fig_w
# and we calculate map_h
map_h = map_w * realratio_h_w
# assume there is the same amount of space above and below:
vpad = (fig_h - map_h) / 2.0
# hpad is zero:
hpad = 0
else:
# we have margins (blank space) left and right
# thus, we assume:
map_h = fig_h
# and consequently:
map_w = map_h / realratio_h_w
# assume there is the same amount of space above and below:
hpad = (fig_w - map_w) / 2.0
# wpad is zero:
vpad = 0
# calculate new fig dimensions EXACTLY as contour of the map
new_fig_w = fig_w - 2 * hpad
new_fig_h = fig_h - 2 * vpad
# now margins:
marginz = parse_margins(figmargins) # margins are in fontheight units. Get font height:
fontsize_inch = 0
if len(np.nonzero(marginz)[0]):
# Calculate the font size in pixels.
# We want to be consistent with matplotlib way of getting fontsize.
# inspecting matplotlib.legend.Legend.draw we end up with:
# 1. Get the renderer
rend = fig.canvas.get_renderer()
# 2. get the fontsize in points. We might use `fontsize` but it might be None and we want
# the default in case. There are several 'defaults' (rcParams['font.size'],
# rcParams["legend.fontsize"])... we don't care for now, use the first. How to get
# rcParams['font.size'] ? Either this: (see at matplotlib.Legend.__init__):
# fontsize_pt = FontProperties(size=fontsize, weight=fontweight).get_size_in_points()
# or simply do:
fontsize_pt = fontsize or rcParams['font.size']
# Now use renderer to convert to pixels:
# For info see matplotlib.text.Text.get_window_extent
fontsize_px = rend.points_to_pixels(fontsize_pt)
# finally inches:
fontsize_inch = pix2inch(rend.points_to_pixels(fontsize_px), fig)
# calculate insets in inches (top right bottom left)
insets_inch = marginz * fontsize_inch
# set to fig dimensions
new_fig_w += insets_inch[1] + insets_inch[3]
new_fig_h += insets_inch[0] + insets_inch[2]
fig.set_size_inches(new_fig_w, new_fig_h, forward=True)
# (forward necessary if fig is in GUI, let's set for safety)
# now the axes which are relative to the figure. Thus first normalize inches:
insets_inch /= [fig_h, fig_w, fig_h, fig_w]
# pos1 = map_ax.get_position() # get the original position
# NOTE: it seems that pos[0], pos[1] indicate the x and y of the LOWER LEFT corner, not
# upper left!
pos2 = [insets_inch[3], insets_inch[2],
1 - (insets_inch[1] + insets_inch[3]),
1 - (insets_inch[0] + insets_inch[2])]
map_ax.set_position(pos2)
if title:
plt.suptitle(title)
if show:
plt.show()
return fig
| gpl-3.0 |
wwang2/GaussianBeamVI | GaussianVi.py | 1 | 6426 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from matplotlib import text
# Define initial setuplength and plot resolution
length = 800.0
res = 0.1
# Define plots and range
fig, ax = plt.subplots()
fig.canvas.set_window_title('GaussianBeamVI - copyright @ [email protected]')
plt.subplots_adjust(bottom=0.50)
# beam waist function
def beam(w,lamda,start,stop):
x = np.arange(start, stop, res)
return w*(1+ (lamda*(x-start)/(np.pi*w**2))**2)**0.5
# Plot the back trace of the beam
def backbeam(m,w,lamda,start,stop):
zr = np.pi*(w*m)**2/lamda
x = np.arange(start, stop, res)
return m*w*(1+ ((stop-x)/zr)**2)**0.5
# Compute the waist position
def imageposition(w,lamda,f,s):
zr = np.pi*w**2/lamda
return 1/(1/f - 1/(s+zr**2/(s-f)))
# Compute the magnification
def mag(w,lamda,f,s):
zr = np.pi*w**2/lamda
return 1/(((1-s/f)**2+(zr/f)**2)**0.5)
# Reset
def reset(event):
spos1.reset()
sf1.reset()
sw0.reset()
# initial beam and lens position
lamda0 = 782e-6
pos1ini = 51
f1ini = 50.0
wini = 0.5
m1ini = mag(wini,lamda0,f1ini,pos1ini)
# 2nd lens initial parameter
f2ini = 50.0
pos2ini = 151.0
# Compute Initial magnification and beamwaist position
im1posini = imageposition(wini,lamda0,f1ini,pos1ini)
im2posini = imageposition(wini*m1ini,lamda0,f2ini,pos2ini-im1posini-pos1ini)
w1ini = m1ini*wini
m2ini = mag(w1ini,lamda0,f2ini,pos2ini-im1posini-pos1ini)
w2ini = m2ini*w1ini
# Initialize Plots with initial parameters
beam1down ,= plt.plot(np.arange(0,pos1ini,res),-beam(wini,lamda0,0.0,pos1ini),color = "Blue")
beam1up ,= plt.plot(np.arange(0,pos1ini,res),beam(wini,lamda0,0.0,pos1ini),color = "Blue")
beam2down ,= plt.plot(np.arange(pos1ini+im1posini,pos2ini,res),
-beam(w1ini,lamda0,pos1ini+im1posini,pos2ini),color = "Blue")
beam2up ,= plt.plot(np.arange(pos1ini+im1posini,pos2ini,res),
beam(w1ini,lamda0,pos1ini+im1posini,pos2ini),color = "Blue")
bbeam1down ,= plt.plot(np.arange(pos1ini,pos1ini+im1posini,res),
-backbeam(m1ini,wini,lamda0,pos1ini,pos1ini+im1posini),color = "Blue")
bbeam1up ,= plt.plot(np.arange(pos1ini,pos1ini+im1posini,res),
backbeam(m1ini,wini,lamda0,pos1ini,pos1ini+im1posini),color = "Blue")
beam3down ,= plt.plot(np.arange(pos2ini+im2posini,length,res),
-beam(w2ini,lamda0,pos2ini+im2posini,length),color = "Blue")
beam3up ,= plt.plot(np.arange(pos2ini+im2posini,length,res),
beam(w2ini,lamda0,pos2ini+im2posini,length),color = "Blue")
bbeam2down ,= plt.plot(np.arange(pos2ini,pos2ini+im2posini,res),
-backbeam(m2ini,w1ini,lamda0,pos2ini,pos2ini+im2posini),color = "Blue")
bbeam2up ,= plt.plot(np.arange(pos2ini,pos2ini+im2posini,res),
backbeam(m2ini,w1ini,lamda0,pos2ini,pos2ini+im2posini),color = "Blue")
im1 ,= plt.plot([pos1ini+im1posini,pos1ini+im1posini], [-w1ini,w1ini])
im2 ,= plt.plot([pos2ini+im2posini,pos2ini+im2posini], [-w2ini,w2ini])
lens1 ,= plt.plot([pos1ini,pos1ini],[-2,2])
lens2 ,= plt.plot([pos2ini,pos2ini],[-2,2])
plt.axis([0, length, -2, 2])
ax.xaxis.set_ticks(np.arange(0, length, 50))
ax.yaxis.set_ticks(np.arange(-2.5, 2.5, 0.25))
ax.tick_params(labeltop=True, labelright=True)
axcolor = 'lightgoldenrodyellow'
# Define wavelength Slider
axlamda = plt.axes([0.25, 0.35, 0.65, 0.03], axisbg=axcolor)
slamda = Slider(axlamda, 'wavelent', 200, 1200, valinit=782)
# Define lens position slider
axpos1 = plt.axes([0.25, 0.25, 0.65, 0.03], axisbg=axcolor)
axpos2 = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
spos1 = Slider(axpos1, 'position1', 0.0, length, valinit=pos1ini)
spos2 = Slider(axpos2, 'position2', 0.0, length, valinit=pos2ini)
# Define initial beam wasit slider
axw0 = plt.axes([0.25, 0.3, 0.65, 0.03], axisbg=axcolor)
sw0 = Slider(axw0, 'beam waist', 0.0, 2.0, valinit=wini)
# Define lens1 focus slider
axf1 = plt.axes([0.25, 0.2, 0.65, 0.03], axisbg=axcolor)
sf1 = Slider(axf1, 'lens 1 focus', 0.0, 300, valinit=f1ini)
# Define lens2 foucs slider
axf2 = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
sf2 = Slider(axf2, 'lens 2 focus', 0.0, 300, valinit=f2ini)
# Update plots in response to sliders
def update(val):
lamda = slamda.val* 0.000001
print lamda
pos1 = spos1.val
pos2 = spos2.val
w0 = sw0.val
f1 = sf1.val
f2 = sf2.val
m1 = mag(w0,lamda,f1,pos1)
im1pos = imageposition(w0,lamda,f1,pos1)
w1 = m1*w0
m2 = mag(w1,lamda,f2,pos2-im1pos-pos1)
im2pos = imageposition(w1,lamda,f2,pos2-im1pos-pos1)
w2 = m2*w1
print w2
lens1.set_data([pos1, pos1], [-2,2])
lens2.set_data([pos2, pos2], [-2,2])
beam1up.set_data(np.arange(0.0, pos1,res),np.array(beam(w0,lamda,0.0,pos1)))
bbeam1up.set_data(np.arange(pos1,pos1+im1pos,res),np.array(backbeam(m1,w0,lamda,pos1,pos1+im1pos)))
beam1down.set_data(np.arange(0.0, pos1,res),-np.array(beam(w0,lamda,0.0,pos1)))
bbeam1down.set_data(np.arange(pos1,pos1+im1pos,res),-np.array(backbeam(m1,w0,lamda,pos1,pos1+im1pos)))
beam2up.set_data(np.arange(pos1+im1pos,pos2,res),np.array(beam(w1,lamda,pos1+im1pos,pos2)))
beam2down.set_data(np.arange(pos1+im1pos,pos2,res),-np.array(beam(w1,lamda,pos1+im1pos,pos2)))
beam3up.set_data(np.arange(pos2+im2pos,length,res),np.array(beam(w2,lamda,pos2+im2pos,length)))
beam3down.set_data(np.arange(pos2+im2pos,length,res),-np.array(beam(w2,lamda,pos2+im2pos,length)))
bbeam2up.set_data(np.arange(pos2,pos2+im2pos,res),
backbeam(m2,w1,lamda,pos2,pos2+im2pos))
bbeam2down.set_data(np.arange(pos2,pos2+im2pos,res),
-backbeam(m2,w1,lamda,pos2,pos2+im2pos))
im1.set_data([im1pos+pos1,im1pos+pos1],[-w1,w1])
im2.set_data([pos2+im2pos,pos2+im2pos], [-w2,w2])
fig.canvas.draw_idle()
# Call the update function once slider changes
sw0.on_changed(update)
spos1.on_changed(update)
sf1.on_changed(update)
spos2.on_changed(update)
sf2.on_changed(update)
slamda.on_changed(update)
# Define Reset Button
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
button.on_clicked(reset)
plt.show()
| gpl-3.0 |
xray/xray | xarray/tests/test_concat.py | 1 | 21946 | from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable, concat
from xarray.core import dtypes, merge
from . import (
InaccessibleArray,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
)
from .test_dataset import create_test_data
def test_concat_compat():
ds1 = Dataset(
{
"has_x_y": (("y", "x"), [[1, 2]]),
"has_x": ("x", [1, 2]),
"no_x_y": ("z", [1, 2]),
},
coords={"x": [0, 1], "y": [0], "z": [-1, -2]},
)
ds2 = Dataset(
{
"has_x_y": (("y", "x"), [[3, 4]]),
"has_x": ("x", [1, 2]),
"no_x_y": (("q", "z"), [[1, 2]]),
},
coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]},
)
result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals")
assert_equal(ds2.no_x_y, result.no_x_y.transpose())
for var in ["has_x", "no_x_y"]:
assert "y" not in result[var].dims and "y" not in result[var].coords
with raises_regex(ValueError, "coordinates in some datasets but not others"):
concat([ds1, ds2], dim="q")
with raises_regex(ValueError, "'q' is not present in all datasets"):
concat([ds2, ds1], dim="q")
class TestConcatDataset:
@pytest.fixture
def data(self):
return create_test_data().drop_dims("dim3")
def rectify_dim_order(self, data, dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(
{k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()},
dataset.coords,
attrs=dataset.attrs,
)
@pytest.mark.parametrize("coords", ["different", "minimal"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_simple(self, data, dim, coords):
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
assert_identical(data, concat(datasets, dim, coords=coords))
def test_concat_merge_variables_present_in_some_datasets(self, data):
# coordinates present in some datasets but not others
ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1})
ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2})
actual = concat([ds1, ds2], dim="y", coords="minimal")
expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2})
assert_identical(expected, actual)
# data variables present in some datasets but not others
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
data0, data1 = deepcopy(split_data)
data1["foo"] = ("bar", np.random.randn(10))
actual = concat([data0, data1], "dim1")
expected = data.copy().assign(foo=data1.foo)
assert_identical(expected, actual)
def test_concat_2(self, data):
dim = "dim2"
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, self.rectify_dim_order(data, actual))
@pytest.mark.parametrize("coords", ["different", "minimal", "all"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_coords_kwarg(self, data, dim, coords):
data = data.copy(deep=True)
# make sure the coords argument behaves as expected
data.coords["extra"] = ("dim4", np.arange(3))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords=coords)
if coords == "all":
expected = np.array([data["extra"].values for _ in range(data.dims[dim])])
assert_array_equal(actual["extra"].values, expected)
else:
assert_equal(data["extra"], actual["extra"])
def test_concat(self, data):
split_data = [
data.isel(dim1=slice(3)),
data.isel(dim1=3),
data.isel(dim1=slice(4, None)),
]
assert_identical(data, concat(split_data, "dim1"))
def test_concat_dim_precedence(self, data):
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data["dim1"]).rename("dim1")
datasets = [g for _, g in data.groupby("dim1", squeeze=False)]
expected = data.copy()
expected["dim1"] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({"foo": ("x", np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ["minimal", "different", "all", [], ["foo"]]:
actual = concat(objs, dim="x", data_vars=data_vars)
assert_identical(data, actual)
def test_concat_coords(self):
data = Dataset({"foo": ("x", np.random.randn(10))})
expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
objs = [
data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1),
]
for coords in ["different", "all", ["c"]]:
actual = concat(objs, dim="x", coords=coords)
assert_identical(expected, actual)
for coords in ["minimal", []]:
with raises_regex(merge.MergeError, "conflicting values"):
concat(objs, dim="x", coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]})
for mode in ["different", "all", ["foo"]]:
actual = concat([ds1, ds2], "y", data_vars=mode)
assert_identical(expected, actual)
with raises_regex(merge.MergeError, "conflicting values"):
# previously dim="y", and raised error which makes no sense.
# "foo" has dimension "y" so minimal should concatenate it?
concat([ds1, ds2], "new_dim", data_vars="minimal")
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, "dim1")
assert_identical(data, actual)
actual = concat(split_data[::-1], "dim1")
assert_identical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])})
actual = concat([ds1, ds2], "y")
expected = Dataset(
{
"foo": DataArray(
[[1, 2, np.nan], [1, np.nan, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
}
)
assert_identical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
with raises_regex(ValueError, "must supply at least one"):
concat([], "dim1")
with raises_regex(ValueError, "Cannot specify both .*='different'"):
concat(
[data, data], dim="concat_dim", data_vars="different", compat="override"
)
with raises_regex(ValueError, "must supply at least one"):
concat([], "dim1")
with raises_regex(ValueError, "are not coordinates"):
concat([data, data], "new_dim", coords=["not_found"])
with raises_regex(ValueError, "global attributes not"):
data0, data1 = deepcopy(split_data)
data1.attrs["foo"] = "bar"
concat([data0, data1], "dim1", compat="identical")
assert_identical(data, concat([data0, data1], "dim1", compat="equals"))
with raises_regex(ValueError, "compat.* invalid"):
concat(split_data, "dim1", compat="foobar")
with raises_regex(ValueError, "unexpected value for"):
concat([data, data], "new_dim", coords="foobar")
with raises_regex(ValueError, "coordinate in some datasets but not others"):
concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z")
with raises_regex(ValueError, "coordinate in some datasets but not others"):
concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z")
def test_concat_join_kwarg(self):
ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]})
ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]})
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with raises_regex(ValueError, "indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join])
# regression test for #3681
actual = concat([ds1.drop("x"), ds2.drop("x")], join="override", dim="y")
expected = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]}
)
assert_identical(actual, expected)
def test_concat_combine_attrs_kwarg(self):
ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs={"b": 42})
ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs={"b": 42, "c": 43})
expected = {}
expected["drop"] = Dataset({"a": ("x", [0, 0])}, {"x": [0, 1]})
expected["no_conflicts"] = Dataset(
{"a": ("x", [0, 0])}, {"x": [0, 1]}, {"b": 42, "c": 43}
)
expected["override"] = Dataset({"a": ("x", [0, 0])}, {"x": [0, 1]}, {"b": 42})
with raises_regex(ValueError, "combine_attrs='identical'"):
actual = concat([ds1, ds2], dim="x", combine_attrs="identical")
with raises_regex(ValueError, "combine_attrs='no_conflicts'"):
ds3 = ds2.copy(deep=True)
ds3.attrs["b"] = 44
actual = concat([ds1, ds3], dim="x", combine_attrs="no_conflicts")
for combine_attrs in expected:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
assert_identical(actual, expected[combine_attrs])
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})]
actual = concat(objs, "x")
assert_identical(actual, expected)
# mixed dims between variables
objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})]
actual = concat(objs, "x")
expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])})
assert_identical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])})
assert_identical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
assert_identical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [
Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}),
Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}),
]
actual = concat(objs, "x")
expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]})
assert_identical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}),
]
expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]})
actual = concat(objs, "t")
assert_identical(expected, actual)
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}),
]
with pytest.raises(ValueError):
concat(objs, "t", coords="minimal")
def test_concat_dim_is_variable(self):
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
coord = Variable("y", [3, 4])
expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]})
actual = concat(objs, coord)
assert_identical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]])
expected = Dataset({"x": x})
actual = concat(
[expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x"
)
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}])
def test_concat_fill_value(self, fill_value):
datasets = [
Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_a = fill_value_b = np.nan
elif isinstance(fill_value, dict):
fill_value_a = fill_value["a"]
fill_value_b = fill_value["b"]
else:
fill_value_a = fill_value_b = fill_value
expected = Dataset(
{
"a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]),
"b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]),
},
{"x": [0, 1, 2]},
)
actual = concat(datasets, dim="t", fill_value=fill_value)
assert_identical(actual, expected)
class TestConcatDataArray:
def test_concat(self):
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
bar = ds["bar"]
# from dataset array:
expected = DataArray(
np.array([foo.values, bar.values]),
dims=["w", "x", "y"],
coords={"x": [0, 1]},
)
actual = concat([foo, bar], "w")
assert_equal(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby("x")]
stacked = concat(grouped, ds["x"])
assert_identical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes["x"])
assert_identical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual)
with raises_regex(ValueError, "not identical"):
concat([foo, bar], dim="w", compat="identical")
with raises_regex(ValueError, "not a valid argument"):
concat([foo, bar], dim="w", data_vars="minimal")
def test_concat_encoding(self):
# Regression test for GH1297
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
foo.encoding = {"complevel": 5}
ds.encoding = {"unlimited_dims": "x"}
assert concat([foo, foo], dim="x").encoding == foo.encoding
assert concat([ds, ds], dim="x").encoding == ds.encoding
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [
DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"]
)
for _ in range(2)
]
# should not raise
combined = concat(arrays, dim="z")
assert combined.shape == (2, 3, 3)
assert combined.dims == ("z", "x", "y")
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value):
foo = DataArray([1, 2], coords=[("x", [1, 2])])
bar = DataArray([1, 2], coords=[("x", [1, 3])])
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = DataArray(
[[1, 2, fill_value], [1, fill_value, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
actual = concat((foo, bar), dim="y", fill_value=fill_value)
assert_identical(actual, expected)
def test_concat_join_kwarg(self):
ds1 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}
).to_array()
ds2 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}
).to_array()
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with raises_regex(ValueError, "indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join].to_array())
def test_concat_combine_attrs_kwarg(self):
da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42})
da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43})
expected = {}
expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])])
expected["no_conflicts"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43}
)
expected["override"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42}
)
with raises_regex(ValueError, "combine_attrs='identical'"):
actual = concat([da1, da2], dim="x", combine_attrs="identical")
with raises_regex(ValueError, "combine_attrs='no_conflicts'"):
da3 = da2.copy(deep=True)
da3.attrs["b"] = 44
actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts")
for combine_attrs in expected:
actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs)
assert_identical(actual, expected[combine_attrs])
@pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {}))
@pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {}))
def test_concat_attrs_first_variable(attr1, attr2):
arrs = [
DataArray([[1], [2]], dims=["x", "y"], attrs=attr1),
DataArray([[3], [4]], dims=["x", "y"], attrs=attr2),
]
concat_attrs = concat(arrs, "y").attrs
assert concat_attrs == attr1
def test_concat_merge_single_non_dim_coord():
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1})
for coords in ["different", "minimal"]:
actual = concat([da1, da2], "x", coords=coords)
assert_identical(actual, expected)
with raises_regex(ValueError, "'y' is not present in all datasets."):
concat([da1, da2], dim="x", coords="all")
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1})
for coords in ["different", "all"]:
with raises_regex(ValueError, "'y' not present in all datasets"):
concat([da1, da2, da3], dim="x")
| apache-2.0 |
xya/sms-tools | lectures/04-STFT/plots-code/window-size.py | 22 | 1498 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 128
start = .81*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(9.5, 6))
plt.subplot(321)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x1*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x1, M = 128')
mX, pX = DF.dftAnal(x1, np.hamming(N), N)
plt.subplot(323)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX1')
plt.subplot(325)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX1')
N = 1024
start = .81*fs
x2 = x[start:start+N]
mX, pX = DF.dftAnal(x2, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x2*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x2), max(x2)])
plt.title('x2, M = 1024')
plt.subplot(324)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX2')
plt.subplot(326)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX2')
plt.tight_layout()
plt.savefig('window-size.png')
plt.show()
| agpl-3.0 |
pdehaye/theming-edx-platform | docs/developers/source/conf.py | 15 | 6200 | # -*- coding: utf-8 -*-
#pylint: disable=C0103
#pylint: disable=W0622
#pylint: disable=W0212
#pylint: disable=W0613
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../..'))
root = os.path.abspath('../../..')
sys.path.append(root)
sys.path.append(os.path.join(root, "common/djangoapps"))
sys.path.append(os.path.join(root, "common/lib"))
sys.path.append(os.path.join(root, "common/lib/sandbox-packages"))
sys.path.append(os.path.join(root, "lms/djangoapps"))
sys.path.append(os.path.join(root, "lms/lib"))
sys.path.append(os.path.join(root, "cms/djangoapps"))
sys.path.append(os.path.join(root, "cms/lib"))
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
import mock
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'numpy',
'matplotlib',
'matplotlib.pyplot',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
MiroK/lega | sandbox/bendpy/sine_ddu/helmholtz_1d.py | 1 | 2760 | #
# Solve -u`` + k*u = f in (0, pi) with u(0) = u(pi) = 0 [1]
#
from __future__ import division
from sympy import Symbol
from lega.sine_basis import mass_matrix, stiffness_matrix, load_vector
from lega.sine_basis import sine_eval, sine_fft
import scipy.sparse.linalg as la
from math import pi as Pi
import numpy as np
def get_rhs(u, k):
'''
Verify that u satisfies boundary conditions and compute the right hand
side f.
'''
x = Symbol('x')
assert abs(u.subs(x, 0)) < 1E-15 and abs(u.subs(x, Pi)) < 1E-15
# Right hand side if u is to be the solution
f = -u.diff(x, 2) + k*u
return f
def solve_helmholtz_1d(f, k, n):
'''Solve the Helmoholtz problem by N Fourier sine polynomials.'''
A = stiffness_matrix(n)
M = mass_matrix(n)
# The linear system of lhs of Helmoholtz is
AA = (A + k*M)
# Try to see how big of an error we make when computing rhs with fft
# Integrated
b = load_vector(f, n)
# Try some frequency
# F = sine_eval(8192, f)
# bb = sine_fft(F)[:n]
# print 2**n, '>>>', np.linalg.norm(b-bb)
# The system is (A + k*M)*U = bb
U = la.spsolve(AA, b)
# Note that x is a vector of expansion coeffs of the solution w.r.t to
# the sine basis
return U
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import cos, pi, lambdify
from lega.sine_basis import sine_function
from sympy.plotting import plot
from sympy.mpmath import quad
from math import sqrt, log as ln
import matplotlib.pyplot as plt
# Setup
x = Symbol('x')
u = x*(x-pi)*cos(2*pi*x)
k = 1
f = get_rhs(u, k)
n_max = 30
n = 2
tol = 1E-14
converged = False
ns = []
errors = []
while not converged:
U = solve_helmholtz_1d(f, k, n) # w.r.t to shen
# Error using symobolic functions
uh = sine_function(U)
# Want L2 norm of the error
e = u - uh
error = sqrt(quad(lambdify(x, e**2), [0, Pi]))
# Error by FFT
Evec = sine_eval(f=e, N=2**16)
e_k = sine_fft(Evec)
# Use parseval
error_ = sqrt(np.sum(e_k**2))
if n != 2:
ns.append(n)
errors.append(error)
rate = ln(error/error0)/ln(n0/n)
print 'n=%d, |e|_2=%.4E(%.2f) {e}_2=%.4E' % (n, error, rate, error_)
converged = error < tol or n >= n_max
error0, n0 = error, n
n += 1
plt.figure()
plt.loglog(ns, errors)
# Plot the final numerical one againt analytical
p0 = plot(u, (x, 0, Pi), show=False)
p1 = plot(uh, (x, 0, Pi), show=False)
p1[0].line_color='red'
p0.append(p1[0])
p0.show()
| mit |
smartscheduling/scikit-learn-categorical-tree | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
AFriemann/LowCarb | thirdparty/boost_1_59_0/libs/numeric/odeint/performance/plot_result.py | 43 | 2225 | """
Copyright 2011-2014 Mario Mulansky
Copyright 2011-2014 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
import numpy as np
from matplotlib import pyplot as plt
plt.rc("font", size=16)
def get_runtime_from_file(filename):
gcc_perf_file = open(filename, 'r')
for line in gcc_perf_file:
if "Minimal Runtime:" in line:
return float(line.split(":")[-1])
t_gcc = [get_runtime_from_file("perf_workbook/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_gcc.perf")]
t_intel = [get_runtime_from_file("perf_workbook/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_intel.perf")]
t_gfort = [get_runtime_from_file("perf_workbook/rk4_gfort.perf"),
get_runtime_from_file("perf_ariel/rk4_gfort.perf"),
get_runtime_from_file("perf_lyra/rk4_gfort.perf")]
t_c_intel = [get_runtime_from_file("perf_workbook/rk4_c_intel.perf"),
get_runtime_from_file("perf_ariel/rk4_c_intel.perf"),
get_runtime_from_file("perf_lyra/rk4_c_intel.perf")]
print t_c_intel
ind = np.arange(3) # the x locations for the groups
width = 0.15 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, t_gcc, width, color='b', label="odeint gcc")
rects2 = ax.bar(ind+width, t_intel, width, color='g', label="odeint intel")
rects3 = ax.bar(ind+2*width, t_c_intel, width, color='y', label="C intel")
rects4 = ax.bar(ind+3*width, t_gfort, width, color='c', label="gfort")
ax.axis([-width, 2.0+5*width, 0.0, 0.85])
ax.set_ylabel('Runtime (s)')
ax.set_title('Performance for integrating the Lorenz system')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('Core i5-3210M\n3.1 GHz',
'Xeon E5-2690\n3.8 GHz',
'Opteron 8431\n 2.4 GHz'))
ax.legend(loc='upper left', prop={'size': 16})
plt.savefig("perf.pdf")
plt.savefig("perf.png", dpi=50)
plt.show()
| mit |
FRidh/ism | ism/ism.py | 1 | 21554 | """
This module contains an implementation of the Image Source Method (ISM).
"""
from heapq import nlargest
from geometry import Point, Plane, Polygon
from ._ism import Wall, Mirror, is_shadowed, test_effectiveness
import logging
from cytoolz import unique, count
import numpy as np
# To render the geometry
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Patch3DCollection
from ._tools import Arrow3D
from matplotlib import animation
def amount_of_sources(order, walls):
"""The amount of potential sources :math:`N` up to a certain order :math:`o` for a given amount of walls :math:`w`.
:param order: Order threshold :math:`o`.
:param walls: Amount of walls :math:`w`.
:rtype: int
The amount of potential sources :math:`N` is given by
.. math:: N = 1 + \\sum w \\left( w-1 \\right)^{o-1}
"""
return 1 + sum((walls*(walls-1)**(o-1) for o in range(1, order+1)))
class Model(object):
"""The `Model` is the main class used for determining mirror sources and their effectiveness.
This implementation requires a fixed source position. The receiver position can vary.
"""
def __init__(self, walls, source, receiver, max_order=3):#, max_distance=1000.0, min_amplitude=0.01):
self.walls = walls
"""Walls
"""
self.source = source
"""Source position. Requires a list of points.
The source cannot move.
##Required is an instance of :class:`geometry.Point`
"""
self.receiver = receiver
"""Receiver position. Requires a list of points.
The receiver can move.
##Required is a list of instances of :class:`geometry.Point`
"""
self.max_order = max_order
"""Order threshold. Highest order to include.
"""
@property
def source(self):
return self._source
@source.setter
def source(self, x):
if isinstance(x, list):
self._source = x
elif(isinstance(x, np.ndarray)):
self._source = [Point(*row) for row in x]
else:
raise ValueError("List of Point instances are required.")
@property
def receiver(self):
return self._receiver
@receiver.setter
def receiver(self, x):
if isinstance(x, list):
self._receiver = x
elif(isinstance(x, np.ndarray)):
self._receiver = [Point(*row) for row in x]
else:
raise ValueError("List of Point instances are required.")
@property
def is_source_moving(self):
return count(unique(self.source, key=tuple)) != 1
@property
def is_receiver_moving(self):
return count(unique(self.receiver, key=tuple)) != 1
def mirrors(self):
"""Mirrors.
Determine the mirrors of non-moving source. Whether the mirrors are effective can be obtained using :meth:`determine`.
In order to determine the mirrors a receiver position is required. The first receiver location is chosen.
"""
if not self.walls:
raise ValueError("ISM cannot run without any walls.")
yield from ism(self.walls, self.source[0], self.receiver[0], self.max_order)
def _determine(self, mirrors):
"""Determine mirror source effectiveness and strength.
"""
#r = 1 if isinstance(self.receiver, Point) else len(self.receiver)
n_positions = len(self.receiver)
n_frequencies = len(self.walls[0].impedance)
#amount_of_receiver_positions = r
while True:
mirror = next(mirrors)
mirror.effective = np.empty(n_positions, dtype='int32')#, dtype='bool')
mirror.distance = np.empty(n_positions, dtype='float64')
mirror.strength = np.ones((n_positions, n_frequencies), dtype='complex128')
for t in range(n_positions):
if mirror.mother is not None:
mother_strength = mirror.mother.strength[t]
else:
mother_strength = np.ones((n_frequencies), dtype='complex128')
mirror.effective[t], mirror.strength[t], mirror.distance[t] = test_effectiveness(self.walls,
self.source[0],
self.receiver[t],
mirror.position,
mirror.wall,
mother_strength)
yield mirror
@staticmethod
def _strongest(mirrors, amount):
"""Determine strongest mirror sources.
:param mirrors: Iterator with mirrors.
:param amount: Amount of mirrors to keep.
:returns: Generator yielding sorted values.
"""
yield from nlargest(amount, mirrors, key=lambda x:x.strength.max())
def determine(self, strongest=None):
"""Determine.
"""
if not self.walls:
raise ValueError("ISM cannot run without any walls.")
#self.determine_mirrors()
logging.info("determine: Determining mirror sources.")
mirrors = self.mirrors()
logging.info("determine: Determining mirror sources strength and effectiveness.")
mirrors = self._determine(mirrors)
if strongest:
logging.info("determine: Determining strongest mirror sources.")
mirrors = self._strongest(mirrors, strongest)
yield from mirrors
def plot(self, **kwargs):
return plot_model(self, **kwargs)
def plot_walls(self, filename=None):
"""
Render of the walls. See :def:`plot_walls`.
"""
return plot_walls(self.walls, filename)
def strongest(mirrors, n):
"""Keep the `n` strongest mirrors.
:param mirrors: Mirrors
:param n: Amount of mirrors to keep.
"""
yield from nlargest(amount, mirrors, key=lambda x:x.strength.max())
def ism(walls, source_position, receiver_position, max_order=3):
"""Image source method.
:param walls: List of walls
:param source: Position of Source
:param receiver: Position of Receiver
:param max_order: Maximum order to determine image sources for.
:param max_distance: Maximum distance
:param max_amplitude: Maximum amplitude
"""
logging.info("Start calculating image sources.")
n_walls = len(walls)
source_receiver_distance = source_position.distance_to(receiver_position)
mirrors = list()
"""List of lists with mirror sources where ``mirrors[order]`` is a list of mirror sources of order ``order``"""
"""Step 3: Include the original source."""
"""Test first whether there is a direct path."""
logging.info("Main source effective: {}".format(not is_shadowed(source_position, receiver_position, walls)))
#mirrors.append([Mirror(source_position,
#None,
#None,
#0,
#source_position.distance_to(receiver_position),
#np.ones_like(walls[0].impedance),
#not is_shadowed(source_position, receiver_position, walls)
#)])
mirrors.append([Mirror(source_position, mother=None, wall=None, order=0)])
"""Step 4: Loop over orders."""
for order in range(1, max_order+1):
mirrors.append(list()) # Add per order a list that will contain mirror sources of that order
"""Step 5: Loop over sources of this order."""
for m, mirror in enumerate(mirrors[order-1], start=1):
"""Step 6: Loop over walls."""
for wall in walls:
info_string = "Order: {} - Mirror: {} - Wall: {}".format(order, m, wall)
"""Step 7: Several geometrical truncations.
We won't consider a mirror source when..."""
if wall == mirror.wall:
logging.info(info_string + " - Illegal- Generating wall of this mirror.")
continue # ...the (mirror) source one order lower is already at this position.
if mirror.position.on_interior_side_of(wall.plane()) == -1:
logging.info(info_string + " - Illegal - Mirror on wrong side of wall. Position: {}".format(mirror.position) )
continue #...the (mirror) source is on the other side of the wall.
if mirror.wall: # Should be mirrored at a wall. This is basically only an issue with zeroth order?
#print ('Order: {}'.format(str(order)))
#print ('Wall center: {}'.format(str(wall.center)))
#print ('Wall plane: {}'.format(str(wall)))
#print ('Mirror plane: {}'.format(str(mirror.wall)))
#print ('Mirror position: {}'.format(str(mirror.position)))
if not wall.center.in_field_angle(mirror.position, mirror.wall, wall.plane()):
#if is_point_in_field_angle(mirror.position, wall.center, mirror.wall, wall) == -1:
logging.info(info_string + " - Illegal - Center of wall cannot be seen.")
continue #...the center of the wall is not visible from the (mirror) source.
#else:
"""Step 8: Evaluate new mirror source and its parameters."""
position = mirror.position.mirror_with(wall.plane()) # Position of the new source
logging.info(info_string + " - Storing mirror.")
mirrors[order].append(Mirror(position, mirror, wall, order))
#position_receiver_distance = position.distance_to(receiver_position) # Distance between receiver and the new source
#cos_angle = wall.plane().normal().cosines_with(position.cosines_with(receiver_position)) # Cosine of the angle between the line of sight and the wall normal.
#try:
#refl = (wall.impedance*cos_angle - 1.0) / (wall.impedance*cos_angle + 1.0) # Reflection coefficient
#except ZeroDivisionError: # If angle of incidence is 90 degrees, then cos_angle is 0.0. With hard reflection this results in a division by zero.
#refl = 1.0
#strength = mirror.strength * refl # Amplitude strength due to current and past reflections
#print("Refl {}".format(refl))
"""Step 9: Truncation for weak q."""
###"""Check if q is not too weak."""
###if np.all(strength < min_amplitude):
###logging.info(info_string + " - Source is too weak: {}".format(strength))
###continue
###"""Check if q not too far away."""
###if (position_receiver_distance / source_receiver_distance) > max_distance:
###logging.info(info_string + " - Source is too far away: {} > {}".format(position_receiver_distance / source_receiver_distance, max_distance))
###continue
"""Check if q can be seen."""
"""We have to create a plane on which the receiver_position is situated."""
#effective = not is_shadowed(mirror.position, receiver_position, walls)
#logging.info(info_string + " - Mirrorsource: {} - Effective: {}".format(position, effective))
#mirrors[order].append(Mirror(position, mirror, wall, order, position_receiver_distance, strength, effective))
yield from (val for subl in mirrors for val in subl)
def children(mirrors, mirror):
"""Yield children of mirror.
"""
for m in mirrors:
if m.mother == mirror:
yield m
#def plot_model(model, receiver=0, positions=True, direct=False, intersections=True, filename=None):
#"""
#Render of the image source model.
#"""
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#receiver = model.receiver[receiver]
#ax.scatter(model.receiver.x, model.receiver.y, model.receiver.z, marker='p', c='g')
#mirrors = list(model.mirrors())
#for mirror in mirrors:
#while True:
## Position of mirror
#if positions:
#ax.scatter(mirror.position.x, mirror.position.y, mirror.position.z)
## Direct (though possibly not effective) path to receiver.
#if direct:
#ax.add_artist(Arrow3D.from_points(mirror.position,
#receiver,
#mutation_scale=20,
#lw=1,
#arrowstyle="-|>"
#))
#if mirror.mother is None:
#ax.add_artist(Arrow3D.from_points(mirror.position,
#receiver,
#mutation_scale=20,
#lw=1,
#arrowstyle="-|>"
#))
#break
#else:
#if intersections:
#intersection = mirror.wall.plane().intersection(mirror.mother.position, mirror.position)
#ax.scatter(intersection.x, intersection.y, intersection.z)
#ax.add_artist(Arrow3D.from_points(mirror.position,
#intersection,
#mutation_scale=20,
#lw=1,
#arrowstyle="-|>"
#))
#mirror = mirror.mother
#return fig
def plot_model(model, draw_source=True, draw_receiver=True, draw_mirrors=True, draw_walls=True):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', aspect='equal')
mirrors = list(model.mirrors())
if draw_receiver:
receiver = np.asarray(model.receiver).T
ax.scatter(receiver[0], receiver[1], receiver[2], marker='o', c='g')
del receiver
if draw_source:
source = np.asarray(model.source).T
ax.scatter(source[0], source[1], source[2], marker='x', c='r')
del source
if draw_mirrors:
_draw_mirrors(ax, mirrors)
if draw_walls:
_draw_walls(ax, model.walls)
return fig
def _draw_mirrors(ax, mirrors):
for mirror in mirrors:
if mirror.order!=0:
ax.scatter(mirror.position.x, mirror.position.y, mirror.position.z, marker='x', c='b')
return ax
def _draw_walls(ax, walls):
if not walls:
return ax
ARROW_LENGTH = 10.0
COLOR_FACES = (0.5, 0.5, 1.0)
polygons = Poly3DCollection( [wall.points for wall in walls], alpha=0.5 )
polygons.set_facecolor(COLOR_FACES)
#polygons.tri.set_edgecolor('k')
ax.add_collection3d( polygons )
#arrows = Patch3DCollection( [Arrow3D.from_points(wall.center, wall.center + (wall.plane().normal()*ARROW_LENGTH) ) for wall in walls ] )
#ax.add_collection3d(arrows)
for wall in walls:
ax.add_artist(Arrow3D.from_points((wall.center),
(wall.center + wall.plane().normal()*ARROW_LENGTH),
mutation_scale=20,
lw=1,
arrowstyle="-|>"))
#ax.relim() # Does not support Collections!!! So we have to manually set the view limits...
#ax.autoscale()#_view()
coordinates = np.array( [wall.points for wall in walls] ).reshape((-1,3))
minimum = coordinates.min(axis=0)
maximum = coordinates.max(axis=0)
ax.set_xlim(minimum[0] - ARROW_LENGTH, maximum[0] + ARROW_LENGTH)
ax.set_ylim(minimum[1] - ARROW_LENGTH, maximum[1] + ARROW_LENGTH)
ax.set_zlim(minimum[2] - ARROW_LENGTH, maximum[2] + ARROW_LENGTH)
ax.set_xlabel(r'$x$ in m')
ax.set_ylabel(r'$y$ in m')
ax.set_zlabel(r'$z$ in m')
return ax
def plot_walls(walls, filename=None):
"""
Render of the walls.
:param walls: Iterable of walls.
:param filename: Optional filename to write figure to.
:returns: figure if filename not specified else None
"""
ARROW_LENGTH = 10.0
COLOR_FACES = (0.5, 0.5, 1.0)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', aspect='equal')
#ax = fig.gca(projection='3d')
ax.set_aspect("equal")
polygons = Poly3DCollection( [wall.points for wall in walls], alpha=0.5 )
polygons.set_facecolor(COLOR_FACES)
#polygons.tri.set_edgecolor('k')
ax.add_collection3d( polygons )
#arrows = Patch3DCollection( [Arrow3D.from_points(wall.center, wall.center + (wall.plane().normal()*ARROW_LENGTH) ) for wall in walls ] )
#ax.add_collection3d(arrows)
for wall in walls:
ax.add_artist(Arrow3D.from_points((wall.center),
(wall.center + wall.plane().normal()*ARROW_LENGTH),
mutation_scale=20,
lw=1,
arrowstyle="-|>"))
#ax.relim() # Does not support Collections!!! So we have to manually set the view limits...
#ax.autoscale()#_view()
coordinates = np.array( [wall.points for wall in walls] ).reshape((-1,3))
minimum = coordinates.min(axis=0)
maximum = coordinates.max(axis=0)
ax.set_xlim(minimum[0] - ARROW_LENGTH, maximum[0] + ARROW_LENGTH)
ax.set_ylim(minimum[1] - ARROW_LENGTH, maximum[1] + ARROW_LENGTH)
ax.set_zlim(minimum[2] - ARROW_LENGTH, maximum[2] + ARROW_LENGTH)
ax.set_xlabel(r'$x$ in m')
ax.set_ylabel(r'$y$ in m')
ax.set_zlabel(r'$z$ in m')
if filename:
fig.savefig(filename)
else:
return fig
###class AnimatedScatter(object):
###"""An animated scatter plot using matplotlib.animations.FuncAnimation."""
###def __init__(self, data, numpoints=50):
###self.numpoints = numpoints
###self.data = data
#### Setup the figure and axes...
###self.fig, self.ax = plt.subplots()
#### Then setup FuncAnimation.
###self.ani = animation.FuncAnimation(self.fig, self.update, interval=5,
###init_func=self.setup_plot, blit=True)
###def setup_plot(self):
###"""Initial drawing of the scatter plot."""
###x, y, s, c = next(self.stream)
###self.scat = self.ax.scatter(x, y, c=c, s=s, animated=True)
###self.ax.axis([-10, 10, -10, 10])
#### For FuncAnimation's sake, we need to return the artist we'll be using
#### Note that it expects a sequence of artists, thus the trailing comma.
###return self.scat,
###def update(self, i):
###"""Update the scatter plot."""
###data = next(self.data)
#### Set x and y data...
###self.scat.set_offsets(data[:2, :])
#### Set sizes...
###self.scat._sizes = 300 * abs(data[2])**1.5 + 100
#### Set colors..
###self.scat.set_array(data[3])
#### We need to return the updated artist for FuncAnimation to draw..
#### Note that it expects a sequence of artists, thus the trailing comma.
###return self.scat,
###def show(self):
###plt.show()
###def animate_model(model):
###def _init():
###dp.set_data([], [], [])
###def _animate(i):
###receiver = np.asarray(model.receiver).T
###dp.set_data(receiver[0], receiver[1], receiver[2], marker='o', c='g')
###return dp
###fig = plt.figure()
###ax = fig.add_subplot(111, projection='3d', aspect='equal')
###dp = ax.scatter([], [], [])# lw=2)
###animation.FuncAnimation(fig, _animate, frames=model.determine(), init_func=_init)#, blit=True)
###x = np.linspace(0, 10, 1000)
###def init():
###line.set_data([], [])
###return line,
###def animate(i):
###line.set_data(x, np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi))
###return line,
| bsd-3-clause |
ishank08/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/datasets/stackloss/data.py | 25 | 1907 | """Stack loss data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
TITLE = __doc__
SOURCE = """
Brownlee, K. A. (1965), "Statistical Theory and Methodology in
Science and Engineering", 2nd edition, New York:Wiley.
"""
DESCRSHORT = """Stack loss plant data of Brownlee (1965)"""
DESCRLONG = """The stack loss plant data of Brownlee (1965) contains
21 days of measurements from a plant's oxidation of ammonia to nitric acid.
The nitric oxide pollutants are captured in an absorption tower."""
NOTE = """::
Number of Observations - 21
Number of Variables - 4
Variable name definitions::
STACKLOSS - 10 times the percentage of ammonia going into the plant
that escapes from the absoroption column
AIRFLOW - Rate of operation of the plant
WATERTEMP - Cooling water temperature in the absorption tower
ACIDCONC - Acid concentration of circulating acid minus 50 times 10.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/stackloss.csv',"rb"), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
kishwarshafin/deePore | src/modules/dataset.py | 1 | 1614 | import os
from PIL import Image, ImageOps
import numpy as np
import torch
import pandas as pd
from torch.utils.data import Dataset
from torchvision import transforms, utils
from sklearn.preprocessing import MultiLabelBinarizer
from torch.autograd import Variable
class TextColor:
"""
Defines color codes for text used to give different mode of errors.
"""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class PileupDataset(Dataset):
"""
Arguments:
A CSV file path
"""
def __init__(self, csv_path, transform=None):
tmp_df = pd.read_csv(csv_path, header=None)
assert tmp_df[0].apply(lambda x: os.path.isfile(x)).all(), \
"Some images referenced in the CSV file were not found"
self.mlb = MultiLabelBinarizer()
self.transform = transform
self.X_train = tmp_df[0]
label_lists = []
for label in tmp_df[1]:
label_list = [int(x) for x in str(label)]
label_lists.append(np.array(label_list, dtype=np.int))
self.y_train = np.array(label_lists)
def __getitem__(self, index):
img = Image.open(self.X_train[index])
# img = ImageOps.grayscale(img) take bmp files
if self.transform is not None:
img = self.transform(img)
label = torch.from_numpy(self.y_train[index])
return img, label
def __len__(self):
return len(self.X_train.index) | mit |
wenhuchen/ETHZ-Bootstrapped-Captioning | visual-concepts/eval.py | 1 | 11962 | from __future__ import division
from _init_paths import *
import os
import os.path as osp
import sg_utils as utils
import numpy as np
import skimage.io
import skimage.transform
import h5py
import pickle
import json
import math
import argparse
import time
import cv2
from collections import Counter
from json import encoder
"""
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
"""
encoder.FLOAT_REPR = lambda o: format(o, '.2f')
mean = np.array([[[ 103.939, 116.779, 123.68]]])
functional_words = ['a', 'on', 'of', 'the', 'in', 'with', 'and', 'is', 'to', 'an', 'two', 'at', 'next', 'are', 'it']
def scaleandtranspose(im, base_image_size):
# Subtract the ilvsr mean value
new_im = im - mean
# Upsample the image and swap the axes to Color x height x width
new_im = upsample_image(new_im, base_image_size, square=True)
return new_im.transpose((2,0,1))
def BGR2RGB(img):
assert img.shape[2] == 3
new_img = img.copy()
new_img[:, :, [0, 1, 2]] = img[:, :, [2, 1, 0]]
return new_img
def clip(lo, x, hi):
return lo if x <= lo else hi if x >= hi else x
def data_crop(im, boxes):
# Make sure the larger edge is 720 in length
H, W = im.shape[0], im.shape[1]
bbox_img = im.copy()
crop_list = []
for box in boxes:
# Careful that the order is height precede width
leftup_x = clip(0, box[0], W)
leftup_y = clip(0, box[1], H)
rightbot_x = clip(0, box[0] + box[2], W)
rightbot_y = clip(0, box[1] + box[3], H)
crop_list.append(im[leftup_y:rightbot_y, leftup_x:rightbot_x, :])
cv2.rectangle(bbox_img, (leftup_x, leftup_y), (rightbot_x, rightbot_y), (0, 255, 0), 2)
return crop_list, bbox_img
def upsample_image(im, upsample_size, square=False):
h, w = im.shape[0], im.shape[1]
s = max(h, w)
if square:
I_out = np.zeros((upsample_size, upsample_size, 3), dtype=np.float)
else:
new_h = math.ceil(h/w * upsample_size) if w>=h else upsample_size
new_w = math.ceil(w/h * upsample_size) if h>=w else upsample_size
I_out = np.zeros((new_h, new_w, 3), dtype=np.float)
im = cv2.resize(im, None, None, fx = upsample_size/s, fy = upsample_size/s, interpolation=cv2.INTER_CUBIC)
I_out[:im.shape[0], :im.shape[1], :] = im
return I_out
def filter_out(concepts):
rank = Counter()
for concept in concepts:
rank.update(concept)
words = map(lambda arg: arg[0], rank.most_common(20))
return words
class DataLoader(object):
def __init__(self, coco_h5, coco_json):
self.h5 = h5py.File(coco_h5)
self.label_start_ix = self.h5['label_start_ix']
self.label_end_ix = self.h5['label_end_ix']
self.json_image = json.load(open(coco_json))['images']
self.image_num = len(json.load(open(coco_json))['images'])
self.ix_to_word = json.load(open(coco_json))['ix_to_word']
self.split_ix = {}
self.seq_length = 16
self.iterator = {}
for i, info in enumerate(self.json_image):
if info['split'] not in self.split_ix:
self.split_ix[info['split']] = [i]
else:
self.split_ix[info['split']].append(i)
self.reset_iterator()
def get_image_num(self, split):
if split == 'train':
return self.image_num - 10000
else:
return 5000
def reset_iterator(self):
for k in self.split_ix.keys():
self.iterator[k] = 0
def get_batch(self, split, batch_size=1, seq_per_img=5, seq_length=16):
images = np.zeros((batch_size, 256, 256, 3))
seq = np.zeros((seq_per_img, seq_length))
split_ix = self.split_ix[split]
max_ix = self.h5['labels'].shape[0]
max_index = len(split_ix)
wrapped = False
info = []
for i in range(batch_size):
ri = self.iterator[split]
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
wrapped = True
self.iterator[split] = ri_next
ix = split_ix[ri]
ix1 = self.h5['label_start_ix'][ix]
ix2 = self.h5['label_end_ix'][ix]
ncaps = ix2 - ix1 + 1
assert ncaps > 0
if ncaps >= seq_per_img:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=False)
else:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=True)
for j, j_ix in enumerate(rand_ix):
if j_ix >= max_ix:
seq[j] = self.h5['labels'][-1, :seq_length]
else:
seq[j] = self.h5['labels'][j_ix, :seq_length]
im = self.h5['images'][ix].astype(np.float32)
images[i] = np.transpose(im, axes = (1, 2, 0))
info.append({'id': self.json_image[ix]['id'], 'file_path': self.json_image[ix]['file_path']})
return images, seq, info, wrapped
class TestModel(object):
def __init__(self, vocab_file):
# Set threshold_metric_name and output_metric_name
self.base_image_size = 565
self.vocab = utils.load_variables(vocab_file)
self.is_functional = np.array([x not in functional_words for x in self.vocab['words']])
self.threshold = 0.5
def load(self, prototxt_deploy, model_file):
self.net = caffe.Net(prototxt_deploy, model_file, caffe.TEST)
def forward(self, im, order):
# Make sure the image passed in are BGR order and height x width x channel order
self.net.forward(data=im)
# Retrieve the mil probability of the word
mil_probs = self.net.blobs['mil'].data
mil_probs = mil_probs.reshape((mil_probs.shape[0], mil_probs.shape[1]))
top_ind = np.argsort(-mil_probs, axis=-1)[:, :order + len(functional_words)]
# If not for regional features, just return the distribution
if order == 1000:
return self.net.blobs['mil'].data
# Retrive the sigmoid data from the sigmoid layer
fc8_conv_probs = self.net.blobs['fc8-conv-sigmoid'].data
fc8_conv = fc8_conv_probs.reshape((fc8_conv_probs.shape[0], fc8_conv_probs.shape[1], -1))
fc8_conv_arg = fc8_conv.argmax(axis=-1)
# Retrive the correponding feature maps
feat_map = self.net.blobs['fc7-conv'].data
concepts, prob = [], []
att_feat = np.zeros((feat_map.shape[0], order, feat_map.shape[1]), dtype='float32')
feat_probs = np.zeros((feat_map.shape[0], order, 12, 12), dtype='float32')
# Loop over all the sorted indexes
indexes = []
for i in range(top_ind.shape[0]):
tmp_concepts = []
for j in range(top_ind.shape[1]):
word_idx = top_ind[i, j]
prob_map = fc8_conv_probs[i, word_idx, :, :]
index = fc8_conv_arg[i, word_idx]
word = self.vocab['words'][word_idx]
if word not in functional_words:
if index not in indexes:
i1, i2 = divmod(index, 12)
att_feat[i, len(indexes)] = feat_map[i,:,i1,i2]
indexes.append(index)
feat_probs[i, len(tmp_concepts)] = prob_map
tmp_concepts.append(word)
if len(tmp_concepts) >= order:
break
concepts.append(tmp_concepts)
prob.append(mil_probs[i, top_ind[i]].tolist())
return concepts, prob, att_feat, feat_probs
if __name__ == "__main__":
parser = argparse.ArgumentParser("run visual concept extraction")
parser.add_argument("--test_json", type=str, required=True, help="test image json")
parser.add_argument("--dataset", type=str, required=True, help="the dataset to use")
parser.add_argument("--split", type=str, required=True, help="Choose a split to evaluate")
parser.add_argument("--order", type=int, default=20, help="test image json")
parser.add_argument("--gpuid", type=int, default=0, help="GPU id to run")
parser.add_argument("--salient_grt", type=str, default='../Data/coco/salient_grt.json', help="Groundtruth")
parser.add_argument("--batch_size", type=int, default=1, help="Verbose the results")
parser.add_argument("--verbose", action='store_true', help="Verbose the results")
args = parser.parse_args()
# Caffe setting
caffe.set_mode_gpu()
caffe.set_device(args.gpuid)
prototxt = 'output/vgg/mil_finetune.prototxt.deploy'
model_file = 'output/vgg/snapshot_iter_240000.caffemodel'
vocab_file = 'vocabs/vocab_train.pkl'
basedir = '../Data/%s'%args.dataset
prefix = coco_image_base if dataset=='coco' else flickr_image_base
#prototxt = '/home/thes0193/code/output/v2/mil_finetune.prototxt.deploy'
#model_file = '/home/thes0193/code/output/v2/snapshot_iter_240000.caffemodel'
#vocab_file = '/home/thes0193/code/vocabs/new_train_vocab.pkl'
# Load the model
model = TestModel(vocab_file)
with open(args.salient_grt) as f:
salient_grt_map = {item['id']:item['words'] for item in json.load(f)}
model.load(prototxt, model_file)
# Open h5 file, if not exist then create one, if exists just load it
image_f = json.load(open(args.test_json))
result, prec_set = [], []
h5_name = osp.join(basedir, 'Feats_%s.h5'%(args.split))
if osp.exists(h5_name):
h5_f = h5py.File(h5_name, 'r+')
else:
h5_f = h5py.File(h5_name, 'w')
if 'regional_feats' not in h5_f.keys():
feats = h5_f.create_dataset('regional_feats', shape=(len(image_f), args.order*2048), dtype='float32')
else:
feats = h5_f['/regional_feats']
# Start generate results, i.e. visual concepts and regionl features
for start in range(0, len(image_f), args.batch_size):
end = min(start+args.batch_size, len(image_f))
img_batch = np.zeros((args.batch_size, 3, 565, 565), 'float32')
for i in range(start, end):
img = cv2.imread(osp.join(prefix, image_f[i]['file_name']))
img_batch[i-start] = scaleandtranspose(img, 565)
results = model.forward(img_batch, args.order)
# Calculate the precision and recall
for i in range(start, end):
# Calculate precision
if image_f[i]['id'] in salient_grt_map:
ref = salient_grt_map[image_f[i]['id']]
prec = len(set(ref) & set(results[0][i-start]))/len(ref)
prec_set.append(prec)
print "Precision: %0.2f"%(sum(prec_set)/len(prec_set))
# Form results
result.append({'id': f[i]['id'], 'text': results[0][i-start], 'prob': results[1][i-start]})
feats[start:end] = results[2][:,:,::2].reshape((args.batch_size, -1))
"""
img_fig = plt.figure()
plt.imshow(BGR2RGB(origin_img[i]))
plt.axis('off')
fig = plt.figure(figsize=(10, 6), facecolor='white')
for j in range(12):
img = (batch_img[i].transpose((1,2,0)) + mean)/255
ax = fig.add_subplot(3, 4, j+1)
#ax.set_axis_bgcolor('white')
ax.imshow(BGR2RGB(img))
alpha_img = skimage.transform.resize(feat_probs[i,j], [img.shape[0], img.shape[1]])
ax.imshow(alpha_img, cmap=cm.Greys_r, alpha=0.8)
ax.set_title(visual_concepts[i][j])
ax.axis('off')
plt.show()
raw_input("Press Enter to continue...")
"""
if start % 100 == 0 and start > 0:
print "Finished %d images"%start
h5_f.close()
# Dump it into the visual concept files for next step
with open(osp.join(basedir,'visual_concept_%s.json'%args.split), 'w') as f:
pickle.dump(result, f)
| bsd-3-clause |
williampma/opencog | opencog/python/spatiotemporal/temporal_events/relation_formulas.py | 33 | 19534 | from math import fabs, sqrt, floor
from numpy import convolve, NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
import numpy
from scipy.stats.distributions import uniform_gen
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.temporal_interval_handling import calculateCenterMass
from spatiotemporal.time_intervals import TimeInterval
from utility.functions import FunctionPiecewiseLinear, FunctionHorizontalLinear, integral, FUNCTION_ZERO, almost_equals
DECOMPOSITION_PRECISION = 10 ** 14
__author__ = 'keyvan'
TEMPORAL_RELATIONS = {
'p': 'precedes',
'm': 'meets',
'o': 'overlaps',
'F': 'finished by',
'D': 'contains',
's': 'starts',
'e': 'equals',
'S': 'started by',
'd': 'during',
'f': 'finishes',
'O': 'overlapped by',
'M': 'met by',
'P': 'preceded by'
}
class TemporalRelation(dict):
all_relations = 'pmoFDseSdfOMP'
_type = None
_list = None
_vector = None
@staticmethod
def from_list(list_object):
relation = TemporalRelation()
for i, name in enumerate(TemporalRelation.all_relations):
value = list_object[i]
if not isinstance(value, (int, float)):
value = float(value)
relation[name] = value
return relation
def to_list(self):
if self._list is None:
self._list = []
for name in self.all_relations:
self._list.append(self[name])
return self._list
def to_vector(self):
if self._vector is None:
_list = self.to_list()
self._vector = numpy.array(_list)
return self._vector
@property
def type(self):
if self._type is None:
self._type = ''.join([name for name in TemporalRelation.all_relations if self[name] > 0])
return self._type
def __setitem__(self, relation_name, value):
if relation_name not in TemporalRelation.all_relations:
raise AttributeError("'{0}' is not a valid Allen relation'".format(relation_name))
dict.__setitem__(self, relation_name, floor(value * DECOMPOSITION_PRECISION) / DECOMPOSITION_PRECISION)
def __repr__(self):
return 'TemporalRelation({0})'.format(self.type)
def __str__(self):
return repr(self)
def __hash__(self):
return hash(tuple(self.to_list()))
class BaseRelationFormula(object):
def __init__(self):
self.bounds = {}
def duration_of(self, dist):
a, b = self.bounds_of(dist)
return fabs(a - b)
def bounds_of(self, dist):
# if dist in self.bounds:
# return self.bounds[dist]
bounds = calculate_bounds_of_probability_distribution(dist)
self.bounds[dist] = bounds
return bounds
def before_point(self, point_1_value, point_2_value):
return 0
def same_point(self, point_1_value, point_2_value):
return 1 - fabs(self.before_point(point_1_value,
point_2_value) - self.after_point(point_1_value, point_2_value))
def after_point(self, point_1_value, point_2_value):
return self.before_point(point_2_value, point_1_value)
def before_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_1)
def same_integral_bounds(self, dist_1, dist_2):
dist_1_a, dist_1_b = calculate_bounds_of_probability_distribution(dist_1)
dist_2_a, dist_2_b = calculate_bounds_of_probability_distribution(dist_2)
return max(dist_1_a, dist_2_a), min(dist_1_b, dist_2_b)
def after_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_2)
def before(self, dist_1, dist_2):
return integral(lambda x: self.before_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.before_integral_bounds(dist_1, dist_2))
def same(self, dist_1, dist_2):
return integral(lambda x: self.same_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.same_integral_bounds(dist_1, dist_2))
def after(self, dist_1, dist_2):
return integral(lambda x: self.after_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.after_integral_bounds(dist_1, dist_2))
def compare(self, dist_1, dist_2):
"""
returns before, same and after
"""
return self.before(dist_1, dist_2), self.same(dist_1, dist_2), self.after(dist_1, dist_2)
class FormulaCreator(object):
def __init__(self, relation_formula):
self.relation_formula = relation_formula
def temporal_relations_between(self, temporal_event_1, temporal_event_2):
dist_1_beginning, dist_1_ending = temporal_event_1.distribution_beginning, temporal_event_1.distribution_ending
dist_2_beginning, dist_2_ending = temporal_event_2.distribution_beginning, temporal_event_2.distribution_ending
self.relation_formula.bounds[dist_1_beginning] = temporal_event_1.a, temporal_event_1.beginning
self.relation_formula.bounds[dist_1_ending] = temporal_event_1.ending, temporal_event_1.b
self.relation_formula.bounds[dist_2_beginning] = temporal_event_2.a, temporal_event_2.beginning
self.relation_formula.bounds[dist_2_ending] = temporal_event_2.ending, temporal_event_2.b
combinations = [
(dist_1_beginning, dist_2_beginning),
(dist_1_beginning, dist_2_ending),
(dist_1_ending, dist_2_beginning),
(dist_1_ending, dist_2_ending)
]
return self.calculate_relations(combinations)
def calculate_relations(self, combinations=None):
"""
Calculates the values of the 13 relations based on the before, same,
and after values of the combinations between the beginning and
ending distributions of the two intervals obtained, e.g. from
the DecompositionFitter.
:param combinations: the 4 combinations between beginning and ending
distribution
:return: a dictionary containing the 13 relations as keys and their
degrees as values
"""
if combinations is None:
combinations = self.relation_formula.combinations
dist_1_beginning, dist_2_beginning = combinations[0]
dist_1_ending, dist_2_ending = combinations[3]
before = {}
same = {}
after = {}
# iterates over the 4 combinations between beginning and ending
for key in combinations:
before[key], same[key], after[key] = self.relation_formula.compare(*key)
result = TemporalRelation()
result['p'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
before[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['m'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
same[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['o'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['F'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['D'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['s'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['e'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['S'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['d'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['f'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['O'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['M'] = after[dist_1_beginning, dist_2_beginning] * same[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['P'] = after[dist_1_beginning, dist_2_beginning] * after[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
return result
class RelationFormulaConvolution(BaseRelationFormula):
def function_convolution_uniform(self, bounds_1, bounds_2, probability=None):
a1, b1 = bounds_1
a2, b2 = bounds_2
length_1 = fabs(a1 - b1)
length_2 = fabs(a2 - b2)
convolution_bounds_a, convolution_bounds_b = a1 - b2, b1 - a2
trapezium_0, trapezium_1 = convolution_bounds_a, convolution_bounds_a + min(length_2, length_1)
trapezium_2, trapezium_3 = trapezium_1 + fabs(length_1 - length_2), convolution_bounds_b
#assert trapezium_2 + min(length_2, length_1) == trapezium_3
if probability is None:
probability = min(1 / length_1, 1 / length_2)
result = FunctionPiecewiseLinear(
{trapezium_0: 0, trapezium_1: probability, trapezium_2: probability, trapezium_3: 0},
FUNCTION_ZERO)
result.is_normalised = True
return result
def function_convolution(self, dist_1, dist_2, bins=50):
a_1, b_1, a_2, b_2 = 0, 0, 0, 0
if dist_1 in self.bounds:
a_1, b_1 = self.bounds[dist_1]
else:
a_1, b_1 = calculate_bounds_of_probability_distribution(dist_1)
self.bounds[dist_1] = a_1, b_1
if dist_2 in self.bounds:
a_2, b_2 = self.bounds[dist_2]
else:
a_2, b_2 = calculate_bounds_of_probability_distribution(dist_2)
self.bounds[dist_2] = a_2, b_2
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
return self.function_convolution_uniform((a_1, b_1), (a_2, b_2))
convolution_bounds_a, convolution_bounds_b = min(a_1, a_2), max(b_1, b_2)
delta = fabs(convolution_bounds_a - convolution_bounds_b) / bins
convolution_interval = TimeInterval(convolution_bounds_a, convolution_bounds_b, bins)
x = [dist_1.pdf(t) for t in convolution_interval]
y = [dist_2.pdf(t) for t in reversed(convolution_interval)]
c = convolve(x, y)
dictionary_convolution = {}
for t in xrange(len(c)):
dictionary_convolution[delta * t] = c[t]
bias = calculateCenterMass(dictionary_convolution)[0] + dist_2.mean() - dist_1.mean()
dictionary_convolution_biased = {}
for t in dictionary_convolution:
dictionary_convolution_biased[t - bias] = dictionary_convolution[t]
convolution_function = FunctionPiecewiseLinear(dictionary_convolution_biased, FunctionHorizontalLinear(0))
return convolution_function.normalised()
def calculate_similarity(self, dist_1, dist_2):
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
length_dist_1 = self.duration_of(dist_1)
length_dist_2 = self.duration_of(dist_2)
return min(length_dist_1, length_dist_2) / sqrt(length_dist_1 * length_dist_2)
dist_1_mean, dist_2_mean = dist_1.mean(), dist_2.mean()
dist_1_transformed = lambda t: dist_1.pdf(t + dist_1_mean)
dist_2_transformed = lambda t: dist_2.pdf(t + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_transformed(t) * dist_2_transformed(t))
return integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
def compare(self, dist_1, dist_2):
convolution = self.function_convolution(dist_1, dist_2)
before = integral(convolution, NEGATIVE_INFINITY, 0)
after = integral(convolution, 0, POSITIVE_INFINITY)
similarity = self.calculate_similarity(dist_1, dist_2)
correlation = 1 - fabs(before - after)
same = similarity * correlation
return before, same, after
class RelationFormulaGeometricMean(BaseRelationFormula):
def compare(self, dist_1, dist_2):
dist_1_interval = TimeInterval(*self.bounds_of(dist_1))
dist_2_interval = TimeInterval(*self.bounds_of(dist_2))
dictionary_input_output = {}
for time_step in dist_1_interval + dist_2_interval:
dictionary_input_output[time_step] = sqrt(dist_1.pdf(time_step) * dist_2.pdf(time_step))
geometric_mean = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
same = integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
dist_1_mean, dist_1_skewness, dist_1_kurtosis = dist_1.stats(moments='msk')
dist_1_standard_deviation = dist_1.std()
dist_2_mean, dist_2_skewness, dist_2_kurtosis = dist_2.stats(moments='msk')
dist_2_standard_deviation = dist_2.std()
distance = fabs(dist_1_standard_deviation - dist_2_standard_deviation) + fabs(dist_1_skewness - dist_2_skewness)
distance += fabs(dist_1_kurtosis - dist_2_kurtosis)
delta = dist_1_mean - dist_2_mean
non_same_portion = 1.0 - same
portion_after, portion_before = 1.0, 0.0
if almost_equals(distance, 0):
if delta < 0:
portion_after, portion_before = 0.0, 1.0
else:
dist_1_standardized_pdf = lambda x: dist_1.pdf(dist_1_standard_deviation * x + dist_1_mean)
dist_2_standardized_pdf = lambda x: dist_2.pdf(dist_2_standard_deviation * x + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_standardized_pdf(t) * dist_2_standardized_pdf(t))
geometric_mean_scaled = lambda p: geometric_mean(p / distance)
geometric_mean_scaled_length = max(self.duration_of(dist_1), self.duration_of(dist_2))
dictionary_input_output = {}
for time_step in TimeInterval(-geometric_mean_scaled_length / 2.0, geometric_mean_scaled_length / 2.0):
dictionary_input_output[time_step] = geometric_mean_scaled(time_step)
geometric_mean_scaled = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
portion_after = integral(geometric_mean_scaled, NEGATIVE_INFINITY, delta)
portion_before = integral(geometric_mean_scaled, delta, POSITIVE_INFINITY)
after = portion_after / (portion_after + portion_before) * non_same_portion
return 1.0 - same - after, same, after
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, expon
from spatiotemporal.temporal_events import TemporalEvent, TemporalEventPiecewiseLinear
import matplotlib.pyplot as plt
figure_number = 1
for event_1, event_2 in [
(
TemporalEvent(uniform(loc=3, scale=2), uniform(loc=7, scale=9)),
TemporalEvent(uniform(loc=0, scale=10), uniform(loc=13, scale=2))
),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=3, scale=2)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=6, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=8, scale=5), uniform(loc=15, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=7), uniform(loc=8, scale=7)),
# TemporalEvent(uniform(loc=4, scale=1), uniform(loc=11, scale=2)),
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=0, scale=11), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=8), uniform(loc=6, scale=8)),
# TemporalEvent(uniform(loc=0, scale=22), uniform(loc=13, scale=8))
# ),
#
# (
# TemporalEvent(uniform(loc=2, scale=2), uniform(loc=7, scale=2)),
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=4, scale=2)),
# TemporalEvent(uniform(loc=6, scale=2), uniform(loc=9, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=3), uniform(loc=15, scale=2)),
# TemporalEvent(uniform(loc=5, scale=2), uniform(loc=9, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=5, scale=3), uniform(loc=9, scale=2)),
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=15, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2)),
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2)),
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2))
# ),
#
# (
# TemporalEvent(norm(loc=1, scale=4.5), expon(loc=30, scale=2)),
# TemporalEvent(norm(loc=25, scale=4.5), expon(loc=60, scale=2))
# ),
#
# (
# TemporalEvent(expon(loc=1, scale=4.5), norm(loc=30, scale=2)),
# TemporalEvent(expon(loc=25, scale=4.5), norm(loc=60, scale=2))
# ),
#
# (
# TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
# TemporalEventPiecewiseLinear({7.5: 0, 8.5: 0.1, 9.5: 0.3, 10.5: 0.7, 11.5: 1},
# {13: 1, 14.5: 0.9, 15.3: 0.6, 17: 0.1, 20: 0})
# ),
]:
temporal_relations = event_1 * event_2
print '\nFigure' + str(figure_number)
print '----------------------'
print sum(temporal_relations.values())
for p in 'pmoFDseSdfOMP':
print p, temporal_relations[p]
figure_number += 1
event_1.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
event_2.plot(show_distributions=True).figure()
plt.show()
| agpl-3.0 |
jchodera/MSMs | jchodera/abl-11400/pyemma/cluster.py | 1 | 3309 | #!/usr/bin/env python
import pyemma
import numpy as np
import mdtraj
import time
import os
# Source directory
source_directory = '/cbio/jclab/projects/fah/fah-data/munged3/no-solvent/11400' # Abl ensembler
################################################################################
# Load reference topology
################################################################################
print ('loading reference topology...')
reference_pdb_filename = 'protein.pdb'
reference_trajectory = os.path.join(source_directory, 'run0-clone0.h5')
traj = mdtraj.load(reference_trajectory)
traj[0].save_pdb(reference_pdb_filename)
################################################################################
# Initialize featurizer
################################################################################
print('Initializing featurizer...')
import pyemma.coordinates
featurizer = pyemma.coordinates.featurizer(reference_pdb_filename)
#featurizer.add_all() # all atoms
featurizer.add_selection( featurizer.select_Backbone() )
print('Featurizer has %d features.' % featurizer.dimension())
################################################################################
# Define coordinates source
################################################################################
nskip = 40 # number of initial frames to skip
import pyemma.coordinates
from glob import glob
trajectory_filenames = glob(os.path.join(source_directory, 'run*-clone*.h5'))
coordinates_source = pyemma.coordinates.source(trajectory_filenames, features=featurizer)
print("There are %d frames total in %d trajectories." % (coordinates_source.n_frames_total(), coordinates_source.number_of_trajectories()))
################################################################################
# Cluster
################################################################################
print('Clustering...')
generator_ratio = 250
nframes = coordinates_source.n_frames_total()
nstates = int(nframes / generator_ratio)
stride = 1
metric = 'minRMSD'
initial_time = time.time()
clustering = pyemma.coordinates.cluster_uniform_time(data=coordinates_source, k=nstates, stride=stride, metric=metric)
#clustering = pyemma.coordinates.cluster_kmeans(data=coordinates_source, k=nstates, stride=stride, metric=metric, max_iter=10)
#clustering = pyemma.coordinates.cluster_mini_batch_kmeans(data=coordinates_source, batch_size=0.1, k=nstates, stride=stride, metric=metric, max_iter=10)
final_time = time.time()
elapsed_time = final_time - initial_time
print('Elapsed time %.3f s' % elapsed_time)
# Save cluster centers
np.save('clustercenters', clustering.clustercenters)
# Save discrete trajectories.
dtrajs = clustering.dtrajs
dtrajs_dir = 'dtrajs'
clustering.save_dtrajs(output_dir=dtrajs_dir, output_format='npy', extension='.npy')
################################################################################
# Make timescale plots
################################################################################
import matplotlib as mpl
mpl.use('Agg') # Don't use display
import matplotlib.pyplot as plt
from pyemma import msm
from pyemma import plots
lags = [1,2,5,10,20,50]
#its = msm.its(dtrajs, lags=lags, errors='bayes')
its = msm.its(dtrajs, lags=lags)
plots.plot_implied_timescales(its)
plt.savefig('plot.pdf')
| gpl-2.0 |
themrmax/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
jakevdp/scipy | scipy/interpolate/tests/test_rbf.py | 14 | 4604 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
fengzhe29888/gnuradio-old | gr-utils/python/utils/plot_fft_base.py | 53 | 10449 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
shusenl/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
sbyrnes321/multilayer_surface_plasmon | multilayer_surface_plasmon.py | 1 | 53001 | # -*- coding: utf-8 -*-
"""
Calculates surface-plasmon-polariton modes in multilayer planar structures.
For more details see: http://pythonhosted.org/multilayer_surface_plasmon/
"""
#Copyright (C) 2013 Steven Byrnes
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, print_function
import numpy as np
import math, cmath
import scipy.optimize
import scipy.integrate
import matplotlib.pyplot as plt
inf = float('inf')
from math import pi
from copy import deepcopy
import numericalunits as nu
# numericalunits is a package for units and constants,
# See https://pypi.python.org/pypi/numericalunits
# How it works in three sentences:
# (1) 4 * nu.cm means "4 cm".
# (2) my_length / nu.um means "my_length expressed in microns"
# (3) If any output randomly varies between python sessions, it means you made
# a dimensional-analysis error.
def floats_are_equal(a, b, tol=1e-5):
"""
Checks whether a and b (real or complex) are equal, to within tol relative
error.
"""
return abs(a - b) <= tol * (abs(a) + abs(b))
def assert_floats_are_equal(a, b, tol=1e-5):
"""
If a and b are not equal (within tol relative error), then raise an
assertion error that displays both a and b. If they are equal, do nothing.
"""
assert floats_are_equal(a, b, tol), (a,b)
def find_all_zeros(min_re, max_re, min_im, max_im, fn,
grid_points, iterations, reduction_factor,
plot_full_region, show_progress):
"""
fn is a complex function of a complex parameter fn(z). This function tries
to find all its zeros. Looks in the search space given by
min_re <= Re(z) <= max_re and min_im <= Im(z) <= max_im. (But it may also
return some minima slightly outside that search space.)
show_progress=True prints algorithm status messages.
plot_full_region=True displays two color diagrams of the full region
[min_re, max_re] x [min_im, max_im]. The first is a log-plot of |f(z)|. The
second uses complex analysis to plot a quantity that often makes the zeros
of f(z) stand out a little better. (The details don't matter, it's just a
nice visual.)
The algorithm is very simple: We're looking in a rectangular
region in the complex plane. We evaluate fn(z) at a grid of 20 x 20 points
within that region (replace "20" with "grid_points"). Each point with
|fn(z)| smaller than its eight neighbors is a candidate local minimum,
so we draw a smaller box around it, reduced in each dimension by
reduction_factor. Repeat this process a number of times given by the
iterations parameter. (In each step, the number of boxes under
investigation can increase or decrease based on how many candidate minima
were discovered in the previous step.)
The final accuracy in Re(z) is something like
(max_re - min_re) / (grid_points * reduction_factor**(iterations-1))
Analogously for Im(z).
Returns a list of complex answers: [z0, z1, ...]. Some may be spurious, so
check each before use.
The code itself is totally generic, but graph captions etc assume that the
fn(z) is really fn(kx), the complex in-plane wavenumber, and it uses
units of radians per micron
"""
# Check arguments
assert reduction_factor > 1 and max_re > min_re and max_im > min_im
assert (max_re.imag == 0 and min_re.imag == 0
and max_im.imag == 0 and min_im.imag == 0)
# Edge-point rejection (see below) relies on the following assumption:
assert grid_points > 2 * reduction_factor
if plot_full_region:
def inverse_fn(z):
""" 1 / fn(z) """
f = fn(z)
return inf if f == 0 else 1/f
def contour_int(z, d_re, d_im):
"""
Approximate the contour integral of inverse_fn around a point z,
using a rectangle of half-width d_re (in real direction) and
half-height d_im. Just a nice plot that makes zeros stand out.
"""
assert d_re.imag == 0 and d_im.imag == 0 and d_re > 0 and d_im > 0
below = inverse_fn(z - 1j * d_im)
above = inverse_fn(z + 1j * d_im)
left = inverse_fn(z - d_re)
right = inverse_fn(z + d_re)
return (below * (2 * d_re) + right * (2j * d_im)
+ above * (-2 * d_re) + left * (-2j * d_im))
res, re_step = np.linspace(min_re, max_re, num=100, retstep=True)
ims, im_step = np.linspace(min_im, max_im, num=100, retstep=True)
fig = plt.figure()
direct_plot = fig.add_subplot(111)
data = [[math.log10(abs(fn(re + 1j * im))) for re in res] for im in ims]
direct_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,
min_im * nu.um, max_im * nu.um),
origin='lower')
direct_plot.set_xlabel('Re(kx) [rad/um]')
direct_plot.set_ylabel('Im(kx) [rad/um]')
direct_plot.set_title('log(|fn(z)|) -- Looking for minima (blue)')
fig = plt.figure()
contour_plot = fig.add_subplot(111)
data = [[-math.log10(abs(contour_int(re + 1j * im, re_step, im_step)))
for re in res] for im in ims]
contour_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,
min_im * nu.um, max_im * nu.um),
origin='lower')
contour_plot.set_xlabel('Re(kx) [rad/um]')
contour_plot.set_ylabel('Im(kx) [rad/um]')
contour_plot.set_title(
'-log(|contour integral of 1/fn(z) around a little rectangle|)\n'
+ ' -- This plot highlights zeros in fn(z), but also lines of\n'
+ 'discontinuity (where top or bottom kz is pure-imaginary)')
# "regions" is a list where each entry has the form
# [min_re, max_re, min_im, max_im]. Each entry describes a region in which we
# are seeking local minima.
regions = [[min_re, max_re, min_im, max_im]]
region_width_re = max_re - min_re
region_width_im = max_im - min_im
for iteration_number in range(iterations):
# all_local_mins will be a list of (x, y) for every local minimum in
# every region. This is used to generate the next iteration.
all_local_mins = []
for region_index in range(len(regions)):
min_re_now, max_re_now, min_im_now, max_im_now = regions[region_index]
results_grid = []
re_list, re_step = np.linspace(min_re_now, max_re_now, num=grid_points, retstep=True)
im_list, im_step = np.linspace(min_im_now, max_im_now, num=grid_points, retstep=True)
fn_to_minimize = lambda z : abs(fn(z))
results_grid = [[fn_to_minimize(re + 1j * im) for im in im_list]
for re in re_list]
results_grid = np.array(results_grid)
# local_mins will be a list of (i,j) where (re_list[i], im_list[j])
# is a local minimum on the results_grid
local_mins = []
for i in range(grid_points):
for j in range(grid_points):
is_min = all(results_grid[i2, j2] >= results_grid[i,j]
for i2 in [i-1, i, i+1]
for j2 in [j-1, j, j+1]
if (0 <= i2 < grid_points
and 0 <= j2 < grid_points))
if is_min:
local_mins.append((i,j))
# local_mins_OK is the subset of local_mins that passes the
# the edge-rejection test.
# The edge-rejection test says that after the 0'th iteration, any
# point at an edge is probably not a true minimum.
local_mins_OK = []
for (i,j) in local_mins:
z_now = re_list[i] + 1j * im_list[j]
if iteration_number >= 2 and (i == 0 or j == 0 or
i == grid_points-1 or j == grid_points-1):
# Rejecting an edge point...
if show_progress:
print('----')
print('Deleting edge point: region #'
+ str(region_index+1) + ' (i,j)=', (i,j),
' kx in rad/um=',
z_now / nu.um**-1,
' fn(z)=', fn(z_now))
else:
local_mins_OK.append((i,j))
# Add local_mins_OK entries into all_local_mins
for (i,j) in local_mins_OK:
all_local_mins.append(re_list[i] + 1j * im_list[j])
if show_progress:
print('----')
print('iter #' + str(iteration_number)
+ ' , region #' + str(region_index+1) + ' of ' + str(len(regions))
+ ' , ' + str(len(local_mins_OK)) + ' minima')
if len(local_mins_OK) > 0:
print('For each, here is ((i, j), kx in rad/um, fn(kx)):')
print([((i, j), (re_list[i] + 1j * im_list[j]) / nu.um**-1,
fn(re_list[i] + 1j * im_list[j]))
for (i,j) in local_mins_OK])
# Now we've gone through every region.
# Delete redundant minima that showed up in overlapping regions.
all_local_mins_norepeat = []
def is_repeat(z1, z2):
return ((abs((z1 - z2).real) <= 0.5 * re_step) and
(abs((z1 - z2).imag) <= 0.5 * im_step))
for z_now in all_local_mins:
if not any(is_repeat(z_now, z) for z in all_local_mins_norepeat):
all_local_mins_norepeat.append(z_now)
if show_progress:
num_deleted = len(all_local_mins) - len(all_local_mins_norepeat)
if num_deleted > 0:
print('----')
print('After iter #' + str(iteration_number)
+ ', deleted ' + str(num_deleted) + ' redundant point(s)')
all_local_mins = all_local_mins_norepeat
if show_progress:
print('----')
print('** After iter #' + str(iteration_number) + ', we have '
+ str(len(all_local_mins)) + ' candidate minima')
region_width_re /= reduction_factor
region_width_im /= reduction_factor
regions = [[z.real - region_width_re / 2, z.real + region_width_re / 2,
z.imag - region_width_im / 2, z.imag + region_width_im / 2]
for z in all_local_mins]
# Done with main algorithm. Show the discovered minima on the plots as
# white X's. Note: Zeros outside the plot region will not be seen here,
# but the function still returns them.
if plot_full_region:
# Keep the image filling the plot area
direct_plot.autoscale(False)
contour_plot.autoscale(False)
for z in all_local_mins:
direct_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')
contour_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')
return all_local_mins
def find_kzs(params):
"""
"params" is a dictionary containing w (angular frequency), kx (angular
wavenumber), ex_list (unitless permittivity of each layer in x-direction),
ez_list (ditto in z direction), mu_list (unitless permeability in
y-direction).
This function returns a new dictionary containing all those data PLUS
kz_list, a list of kz in each layer.
"""
w = params['w'] # angular frequency (w looks like omega)
kx = params['kx']
ex_list = params['ex_list']
ez_list = params['ez_list']
mu_list = params['mu_list']
N = len(ez_list)
assert N == len(ex_list) == len(ez_list) == len(mu_list) >= 2
assert w > 0
for list_name in ['ex_list', 'ez_list', 'mu_list']:
for i in range(N):
assert params[list_name][i].imag >= 0
kz_list = [cmath.sqrt(w**2 * ex_list[i] * mu_list[i] / nu.c0**2
- kx**2 * ex_list[i] / ez_list[i]) for i in range(N)]
# Imaginary parts should be nonnegative
kz_list = [(-kz if kz.imag < 0 else kz) for kz in kz_list]
new_params = deepcopy(params)
new_params['kz_list'] = kz_list
return new_params
def bc_matrix(params):
"""
Calculate the "boundary condition matrix". This is a matrix M such that
M * [[H0down],[H1up],[H1down],...] = [[0],[0],...]
IF the boundary conditions are all satisfied. (See online docs for
definitions and what's going on.)
params should contain ex_list, ez_list, kx, kz_list, d_list (thickness of
each layer, first and last should be inf.)
"""
w = params['w']
kx = params['kx']
d_list = params['d_list']
ex_list = params['ex_list']
ez_list = params['ez_list']
kz_list = params['kz_list']
N = len(d_list)
assert N == len(d_list) == len(ex_list) == len(ez_list) == len(kz_list)
assert N >= 2
assert d_list[0] == d_list[-1] == inf
# delta = e^{i * kz * d}, i.e. phase change across each layer
# delta[0] and delta[-1] are undefined and are not used.
delta_list = [cmath.exp(1j * kz_list[i] * d_list[i]) for i in range(N)]
Ex_up_over_H_up_list = [kz_list[i] / (w * ex_list[i] * nu.eps0)
for i in range(N)]
Ex_down_over_H_down_list = [-a for a in Ex_up_over_H_up_list]
Ez_up_over_H_up_list = [-kx / (w * ez_list[i] * nu.eps0) for i in range(N)]
Ez_down_over_H_down_list = Ez_up_over_H_up_list[:]
mat = np.zeros((2*N-2, 2*N-2), dtype=complex)
for row_now in range(N-1):
# This row concerns continuity of Ex across the boundary between
# layer_under and layer_over (under and over the boundary respectively)
layer_under = row_now
layer_over = layer_under + 1
# up_under_index is the column index in mat that gets multiplied by
# H_{up} in layer_under.
up_under_index = 2 * layer_under - 1
down_under_index = 2 * layer_under
up_over_index = 2 * layer_over - 1
down_over_index = 2 * layer_over
if layer_under != 0:
assert 0 <= up_under_index < 2*N-2
mat[row_now, up_under_index] = (
Ex_up_over_H_up_list[layer_under] * delta_list[layer_under])
mat[row_now, down_under_index] = Ex_down_over_H_down_list[layer_under]
mat[row_now, up_over_index] = -Ex_up_over_H_up_list[layer_over]
if layer_over != N-1:
assert 0 <= down_over_index < 2*N-2
mat[row_now, down_over_index] = (
-Ex_down_over_H_down_list[layer_over] * delta_list[layer_over])
for row_now in range(N-1, 2*N-2):
# This row concerns continuity of eps_z * Ez across the boundary between
# layer_under and layer_over (under and over the boundary respectively)
layer_under = row_now - (N-1)
layer_over = layer_under + 1
# up_under_index is the column index in mat that gets multiplied by
# H_{up} in layer_under.
up_under_index = 2 * layer_under - 1
down_under_index = 2 * layer_under
up_over_index = 2 * layer_over - 1
down_over_index = 2 * layer_over
if layer_under != 0:
assert 0 <= up_under_index < 2*N-2
mat[row_now, up_under_index] = (ez_list[layer_under] *
Ez_up_over_H_up_list[layer_under] * delta_list[layer_under])
mat[row_now, down_under_index] = (ez_list[layer_under] *
Ez_down_over_H_down_list[layer_under])
mat[row_now, up_over_index] = (-ez_list[layer_over] *
Ez_up_over_H_up_list[layer_over])
if layer_over != N-1:
assert 0 <= down_over_index < 2*N-2
mat[row_now, down_over_index] = (-ez_list[layer_over] *
Ez_down_over_H_down_list[layer_over] * delta_list[layer_over])
return mat
def find_kx(input_params, search_domain=None, show_progress=False,
grid_points=20, iterations=9, reduction_factor=9,
plot_full_region=True):
"""
input_params is a dictionary with the simulation parameters. (ex_list,
d_list, etc.) Returns a list of possible complex kx, sorted from the
lowest-order mode to the highest one discovered.
search_domain is [min Re(kx), max Re(kx), min Im(kx), max Im(kx)] in which
to search for solutions. With default (None), I use some heuristics to
guess a region that is likely to find at least the first mode or two.
The following parameters are passed straight into find_all_zeros():
show_progress, grid_points, iterations, reduction_factor, and
plot_full_region. show_progress=True prints diagnostics during search for
kx minima.
"""
w = input_params['w']
d_list = input_params['d_list']
ex_list = input_params['ex_list']
ez_list = input_params['ez_list']
mu_list = input_params['mu_list']
N = len(mu_list)
assert N == len(d_list) == len(ex_list) == len(ez_list)
# error(z) approaches 0 as kx = z approaches a true plasmon mode.
# It's proportional to the determinant of the boundary-condition matrix,
# which equals zero at modes.
def error(kx):
if kx == 0:
return inf
temp_params = input_params.copy()
temp_params['kx'] = kx
should_be_zero = np.linalg.det(bc_matrix(find_kzs(temp_params)))
return should_be_zero / kx**(N+1)
# "return should_be_zero" is also OK but has an overall slope that
# makes it harder to find zeros; also, there's a false-positive at k=0.
# choose the region in which to search for minima. My heuristic is:
# The upper limit of kx should be large enough that
# 2 * pi * i * kzm * d ~ 20 for the thinnest layer we have, or 3 times
# the light-line, whichever is bigger.
if search_domain is None:
kx_re_max = max(max(abs((20 / (2 * pi * d_list[i]))
* cmath.sqrt(ez_list[i] / ex_list[i])) for i in range(1,N)),
3 * w / nu.c0)
kx_re_min = -kx_re_max
kx_im_min = 0
kx_im_max = abs(kx_re_max)
else:
kx_re_min = search_domain[0]
kx_re_max = search_domain[1]
kx_im_min = search_domain[2]
kx_im_max = search_domain[3]
# Main part of function: Call find_all_zeros()
kx_list = find_all_zeros(kx_re_min, kx_re_max, kx_im_min, kx_im_max, error,
show_progress=show_progress, grid_points=grid_points,
iterations=iterations,
reduction_factor=reduction_factor,
plot_full_region=plot_full_region)
# sort and remove "repeats" with opposite signs
kx_list = sorted(kx_list, key=(lambda kx : abs(kx)))
i=0
while i < len(kx_list) - 1:
if abs(kx_list[i] + kx_list[i+1]) <= 1e-6 * (abs(kx_list[i]) + abs(kx_list[i+1])):
kx_list.pop(i)
else:
i += 1
# Fix amplifying waves
kx_list = [(-kx if (kx.imag < 0 or (kx.imag==0 and kx.real < 0)) else kx)
for kx in kx_list]
return kx_list
def find_all_params_from_kx(params):
"""
params is a dictionary containing kx and other simulation parameters like
w, d_list, etc. It is assumed that this kx really is a mode!
This function calculates kz_list, H_up_list, H_down_list, Ex_up_list,
Ex_down_list, Ez_up_list, Ez_down_list.
It returns a new parameter dictionary containing all the old information
plus those newly-calculated parameters.
This is linear optics, so you can scale the E and H up or down by any
constant factor. (And Poynting vector by the square of that factor.)
I chose the normalization that makes the maximum of Ez_up_list equal to
1 V/nm. (This is arbitrary.)
layer_bottom_list[i] is the z-coordinate of the bottom of layer i. Assume
that layer 0 is z<0,
layer 1 is 0 < z < d_list[1],
layer 2 is d_list[1] < z < d_list[1] + d_list[2], etc.
"""
new_params = find_kzs(deepcopy(params))
w = new_params['w']
d_list = new_params['d_list']
kx = new_params['kx']
kz_list = new_params['kz_list']
ex_list = new_params['ex_list']
ez_list = new_params['ez_list']
mu_list = new_params['mu_list']
N = len(mu_list)
mat = bc_matrix(new_params)
eigenvals, eigenvecs = np.linalg.eig(mat)
which_eigenval_is_zero = np.argmin(np.abs(eigenvals))
null_vector = eigenvecs[:,which_eigenval_is_zero]
if False:
print('null vector:')
print(null_vector)
print('matrix entry absolute values:')
print(np.abs(mat))
print('abs(mat . null_vector) should be 0:')
print(np.abs(np.dot(mat, null_vector)))
print('calculated eigenvalue:')
print(eigenvals[which_eigenval_is_zero])
H_up_list = [0]
H_up_list.extend(null_vector[i] for i in range(1, 2*N-2, 2))
H_down_list = [null_vector[i] for i in range(0, 2*N-2, 2)]
H_down_list.append(0)
assert N == len(H_up_list) == len(H_down_list)
Ex_up_list = [H_up_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)
for i in range(N)]
Ex_down_list = [-H_down_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)
for i in range(N)]
Ez_up_list = [-H_up_list[i] * kx / (w * ez_list[i] * nu.eps0)
for i in range(N)]
Ez_down_list = [-H_down_list[i] * kx / (w * ez_list[i] * nu.eps0)
for i in range(N)]
# normalize E and H.
largest_Ez_up_index = np.argmax(np.abs(np.array(Ez_up_list)))
scale_factor = (1 * nu.V/nu.nm) / Ez_up_list[largest_Ez_up_index]
for X_list in [H_up_list, H_down_list, Ex_up_list, Ex_down_list,
Ez_up_list, Ez_down_list]:
for i in range(N):
X_list[i] *= scale_factor
new_params['H_up_list'] = H_up_list
new_params['H_down_list'] = H_down_list
new_params['Ex_up_list'] = Ex_up_list
new_params['Ex_down_list'] = Ex_down_list
new_params['Ez_up_list'] = Ez_up_list
new_params['Ez_down_list'] = Ez_down_list
# x-component of complex Poynting vector, integrated over a layer
Sx_list = []
for i in range(N):
Ez_up = Ez_up_list[i]
Ez_down = Ez_down_list[i]
H_up_star = H_up_list[i].conjugate()
H_down_star = H_down_list[i].conjugate()
kz = kz_list[i]
d = d_list[i]
Sx = 0
# add each term only if it's nonzero, to avoid 0 * nan in top and
# bottom layers
if Ez_up * H_up_star != 0:
Sx += ((-Ez_up * H_up_star) / (4 * kz.imag)
* (1 - cmath.exp(-2 * kz.imag * d)))
if Ez_down * H_down_star != 0:
Sx += ((-Ez_down * H_down_star) / (4 * kz.imag)
* (1 - cmath.exp(-2 * kz.imag * d)))
if Ez_down * H_up_star != 0:
Sx += ((-Ez_down * H_up_star) / (4j * kz.real)
* (1 - cmath.exp(-2j * kz.real * d))
* cmath.exp(1j * kz * d))
if Ez_up * H_down_star != 0:
Sx += ((-Ez_up * H_down_star) / (4j * kz.real)
* (1 - cmath.exp(-2j * kz.real * d))
* cmath.exp(1j * kz * d))
Sx_list.append(Sx)
new_params['Sx_list'] = Sx_list
# x-component of complex Poynting vector, integrated over all layers
Sx_total = sum(Sx_list)
new_params['Sx_total'] = Sx_total
layer_bottom_list = [-inf, 0]
for i in range(1,N-1):
layer_bottom_list.append(layer_bottom_list[-1] + d_list[i])
new_params['layer_bottom_list'] = layer_bottom_list
return new_params
def find_layer(z, params):
"""
Return the layer index (0 through N-1) in which you find the z-coordinate
z. At a layer boundary, returns either one of those two layers arbitrarily.
"""
N = len(params['d_list'])
for i in range(N):
if z <= params['layer_bottom_list'][i]:
return i-1
return N-1
def Hy(z, params, x=0, layer=None):
"""
Complex H-field at (x,z). Optional "layer" parameter forces the use of the
formulas for fields in a certain layer, regardless of whether z is actually
in that layer or not.
"""
N = len(params['d_list'])
if layer is None:
layer = find_layer(z, params)
H_up = params['H_up_list'][layer]
H_down = params['H_down_list'][layer]
kz = params['kz_list'][layer]
kx = params['kx']
layer_bottom = params['layer_bottom_list'][layer]
layer_top = inf if layer == N-1 else params['layer_bottom_list'][layer + 1]
if H_up == 0:
# This is to avoid 0 * nan errors for infinitely-thick top or bottom
# layers
up_term = 0
else:
up_term = H_up * cmath.exp(1j * kz * (z - layer_bottom) + 1j * kx * x)
if H_down == 0:
down_term = 0
else:
down_term = H_down * cmath.exp(1j * kz * (layer_top - z) + 1j * kx * x)
return up_term + down_term
def Ex(z, params, x=0, layer=None):
"""
Complex E-field (x-component) at (x,z). See Hy documentation.
"""
N = len(params['d_list'])
if layer is None:
layer = find_layer(z, params)
Ex_up = params['Ex_up_list'][layer]
Ex_down = params['Ex_down_list'][layer]
kz = params['kz_list'][layer]
kx = params['kx']
layer_bottom = params['layer_bottom_list'][layer]
layer_top = inf if layer == N-1 else params['layer_bottom_list'][layer + 1]
if Ex_up == 0:
# This is to avoid 0 * nan for infinitely-thick top or bottom layers
up_term = 0
else:
up_term = Ex_up * cmath.exp(1j * kz * (z - layer_bottom) + 1j * kx * x)
if Ex_down == 0:
down_term = 0
else:
down_term = Ex_down * cmath.exp(1j * kz * (layer_top - z) + 1j * kx * x)
return up_term + down_term
def Ez(z, params, x=0, layer=None):
"""
Complex E-field (z-component) at (x,z). See Hy documentation.
"""
N = len(params['d_list'])
if layer is None:
layer = find_layer(z, params)
Ez_up = params['Ez_up_list'][layer]
Ez_down = params['Ez_down_list'][layer]
kz = params['kz_list'][layer]
kx = params['kx']
layer_bottom = params['layer_bottom_list'][layer]
layer_top = inf if layer == N-1 else params['layer_bottom_list'][layer + 1]
if Ez_up == 0:
# This is to avoid 0 * nan for infinitely-thick top or bottom layers
up_term = 0
else:
up_term = Ez_up * cmath.exp(1j * kz * (z - layer_bottom) + 1j * kx * x)
if Ez_down == 0:
down_term = 0
else:
down_term = Ez_down * cmath.exp(1j * kz * (layer_top - z) + 1j * kx * x)
return up_term + down_term
def Sx(z, params, x=0, layer=None):
"""
Complex Poynting vector (x-component) at (x,z). See Hy documentation. The
real part of this equals net energy flow (averaged over a cycle). The
imaginary part has something to do with reactive stored energy at a given
point, flowing around reversably within a single cycle.
"""
Ez_here = Ez(z, params, x=x, layer=layer)
Hy_here = Hy(z, params, x=x, layer=layer)
return -0.5 * Ez_here * Hy_here.conjugate()
def check_mode(params, thorough=False, tol=1e-5):
"""
Check that mode is valid. "thorough" mode takes a bit longer, because it
also checks that the total Poynting vector is consistent with the numerical
integral of the local Poynting vector. This should always be the case
unless I made an algebra error etc. It's off by default because it often
gives false negatives, in cases where the numerical integration does not
converge well.
Return True if the mode passes all tests and is therefore a real mode,
otherwise return a string saying the first error discovered. Some of these
checks should never fail unless there is a stupid coding error ... these
are put in as assertions instead of just returning False
"""
N = len(params['d_list'])
w = params['w']
kx = params['kx']
kz_list = params['kz_list']
ex_list = params['ex_list']
ez_list = params['ez_list']
mu_list = params['mu_list']
layer_bottom_list = params['layer_bottom_list']
Sx_list = params['Sx_list']
Sx_total = params['Sx_total']
# check boundary conditions for Ex, Ez, Hy
for layer_under in range(0,N-1):
layer_over = layer_under + 1
z = layer_bottom_list[layer_over]
ez_under = ez_list[layer_under]
ez_over = ez_list[layer_over]
a = Ex(z, params, layer=layer_under)
b = Ex(z, params, layer=layer_over)
if not floats_are_equal(a,b,tol):
return 'Ex b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))
a = ez_under * Ez(z, params, layer=layer_under)
b = ez_over * Ez(z, params, layer=layer_over)
if not floats_are_equal(a,b,tol):
return 'Ez b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))
a = Hy(z, params, layer=layer_under)
b = Hy(z, params, layer=layer_over)
if not floats_are_equal(a,b,tol):
return 'Hy b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))
# check a few properties of each layer
for i in range(N):
kz = kz_list[i]
ez = ez_list[i]
ex = ex_list[i]
mu = mu_list[i]
assert_floats_are_equal(kz**2,
w**2 * mu * ex / nu.c0**2 - kx**2 * ex /ez,
tol=1e-8)
assert kz.imag >= 0
if (i == 0 or i == N-1) and kz.imag == 0:
return 'kz indicates non-confined wave @ layer ' + str(i)
if thorough:
# Check Sx_list against a numerical integration. This really just tests
# whether I made a stupid mistake in calculus or algebra, it should
# always pass even for non-modes.
# Numerical integration expects order-unity integrand, or else the
# absolute-error criterion can fire before convergence. (A few orders
# of magnitude away from 1 is OK, but not 20 orders of magnitude.) So
# I'll scale up before integrating, then scale down by the same factor
# afterwards. Poor integration can flag a correct solution as incorrect,
# but not vice-versa: If it passes the test, you can trust it.
# This scale_factor seems to work pretty reliably
scale_factor = max(abs(Sx(0, params, layer=0)),
abs(Sx(0, params, layer=1)))
assert scale_factor != 0
for i in range(N):
# Calculate integration limits
if i != 0:
lower_z = layer_bottom_list[i]
else:
lower_z = -20 / abs(kz_list[i].imag)
if i != N-1:
upper_z = layer_bottom_list[i+1]
else:
upper_z = 20 / abs(kz_list[i].imag)
integrand_re = lambda z : (Sx(z, params, layer=i) / scale_factor).real
integrand_im = lambda z : (Sx(z, params, layer=i) / scale_factor).imag
Sx_integrated = (scipy.integrate.quad(integrand_re, lower_z, upper_z)[0]
+ 1j * scipy.integrate.quad(integrand_im, lower_z, upper_z)[0])
Sx_integrated *= scale_factor
assert_floats_are_equal(Sx_list[i], Sx_integrated, tol=1e-5)
assert_floats_are_equal(Sx_total, sum(Sx_list), tol=1e-8)
# All tests passed!
return True
def plot_mode(params, filename_x=None, filename_z=None, z_range=None):
"""
params is a dictionary that should include kx, w, kz_list, H_up_list, etc.
This function plots the mode. Pass a filename_x to save the plot of Ex,
and/or filename_z to save the plot of Ez. z_range = [z_min, z_max] sets
the range of the horizontal axis, or z_range=None to use the default.
"""
kz_list = params['kz_list']
layer_bottom_list = params['layer_bottom_list']
N = len(kz_list)
# Choose a range of z to plot:
if z_range is not None:
z_min = z_range[0]
z_max = z_range[1]
else:
if N == 2:
# For 2 layers, put the boundary in the middle, and show the wave
# decaying on both sides
z_max = min(4 / abs(kz.imag) for kz in kz_list)
z_min = -z_max
else:
# For >= 3 layers, the layers should take up central half of plot
z_max = 1.5 * layer_bottom_list[-1]
z_min = -0.5 * layer_bottom_list[-1]
# Calculate the data
zs = np.linspace(z_min, z_max, num=200)
Exs = np.array([Ex(z, params) for z in zs])
Ezs = np.array([Ez(z, params) for z in zs])
# Normalize the E-fields to max 1
max_E = max(abs(Exs).max(), abs(Ezs).max())
Exs = Exs / max_E
Ezs = Ezs / max_E
plt.figure()
plt.plot(zs / nu.nm, Exs.real, zs / nu.nm, Exs.imag)
for i in range(1,N):
plt.axvline(x=layer_bottom_list[i] / nu.nm, color='k')
plt.title('Ex profile at time 0 and 1/4 cycle later')
plt.xlabel('z position (nm)')
plt.ylabel('E-field (arbitrary units)')
if filename_x is not None:
plt.savefig(filename_x)
plt.figure()
plt.plot(zs / nu.nm, Ezs.real, zs / nu.nm, Ezs.imag)
for i in range(1,N):
plt.axvline(x=layer_bottom_list[i] / nu.nm, color='k')
plt.title('Ez profile at time 0 and 1/4 cycle later')
plt.xlabel('z position (nm)')
plt.ylabel('E-field (arbitrary units)')
if filename_z is not None:
plt.savefig(filename_z)
def rescale_fields(factor, params):
"""
params is a dictionary that should include kx, w, kz_list, H_up_list, etc.
This function multiplies the amplitude of the wave by "factor", and returns
a new, updated parameter bundle.
"""
new_params = deepcopy(params)
N = len(new_params['d_list'])
for name in ['H_up_list', 'H_down_list', 'Ex_up_list', 'Ex_down_list',
'Ez_up_list', 'Ez_down_list']:
for i in range(N):
new_params[name][i] *= factor
for i in range(N):
new_params['Sx_list'][i] *= abs(factor)**2
new_params['Sx_total'] *= abs(factor)**2
return new_params
#########################################################################
############################# TESTS #####################################
#########################################################################
def test_2_layer():
"""
test this calculation against analytical expressions when N=2 for an
isotropic, non-magnetic medium
"""
# angular frequency in radians * THz
w = 100 * nu.THz
# Relative permittivity of metal and dielectric
em = -4.56 + 0.12j
ed = 1.23 + 0.01j
ex_list = ez_list = [ed, em]
# Relative permeabilities
mu_list = [1,1]
# Dictionary of input parameters
input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,
'ez_list': ez_list, 'mu_list': mu_list}
# Calculate the theoretical kx
theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))
if theo_kx.imag < 0:
theo_kx *= -1
print('Theoretical kx:',
'(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))
# If I use the theoretical kx value, the mode should be correct and
# all my tests should pass.
params = deepcopy(input_params)
params['kx'] = theo_kx
params = find_all_params_from_kx(params)
kzd, kzm = params['kz_list']
# check that kz_list is correct
assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))
assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))
# check that layer_bottom_list is correct
assert params['layer_bottom_list'][0] == -inf
assert params['layer_bottom_list'][1] == 0
# Check that the boundary condition matrix agrees with hand-calculation
bc_mat = bc_matrix(params)
# ...top-left is Ex0down / H0down
assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))
# ...top-right is -Ex1up / H1up
assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))
# ...bottom-left is eps0 * Ez0down / H0down
assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))
# ...bottom-right is -eps1 * Ez1up / H1up
assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))
# Check that one of the eigenvalues is almost zero (compared to the size
# of the matrix elements).
eigenvalues = np.linalg.eig(bc_mat)[0]
assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6
# Check that the mode passes all tests.
assert check_mode(params, thorough=True) is True
# Check that I can scale the fields and it still passes all tests.
params_scaled = rescale_fields(1.23+4.56j, params)
assert check_mode(params_scaled, thorough=True) is True
# Now try my kx-finding algorithm, to see if it finds the right value.
kx_list = find_kx(input_params)
print('kx_list:',
['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
kx = kx_list[0]
assert_floats_are_equal(theo_kx, kx)
plot_mode(params)
print('If you see this message, all the tests succeeded!!')
def test_davis():
"""
This should reproduce T.J. Davis, 2009, "Surface plasmon modes in
multi-layer thin-films". http://dx.doi.org/10.1016/j.optcom.2008.09.043
The first plot should resemble Fig. 1b, and the modes should match the ones
found in the table. There are also graphs to reproduce Fig. 2a
"""
w = 2 * pi * nu.c0 / (780 * nu.nm)
eps_gold = -21.19 + 0.7361j
eps_glass = 2.310
eps_MgF2 = 1.891
d_list = [inf, 75 * nu.nm, 10 * nu.nm, 55 * nu.nm, 10 * nu.nm, 75 * nu.nm, inf]
ex_list = [eps_glass, eps_gold, eps_MgF2, eps_gold, eps_MgF2, eps_gold, eps_glass]
ez_list = ex_list
mu_list = [1,1,1,1,1,1,1]
params = {'w': w,
'd_list': d_list,
'ex_list': ex_list,
'ez_list': ez_list,
'mu_list': mu_list}
kx_list = find_kx(params, show_progress=False,
search_domain=[-0.05/nu.nm, 0.05/nu.nm, 0, 0.4/nu.nm],
grid_points=20, iterations=10, reduction_factor=9,
plot_full_region=True)
print('kx_list -- ' + str(len(kx_list)) + ' entries...')
print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
# The modes discoved by Davis (Table 1 of paper)
davis_modes = [x * nu.nm**-1 for x in [1.2969e-2 + 2.7301e-5j,
1.2971e-2 + 2.7644e-5j,
3.0454e-2 + 3.7872e-4j,
3.2794e-2 + 4.6749e-4j,
-2.1254e-4 + 5.4538e-2j,
1.2634e-3 + 5.4604e-2j]]
for i in range(len(davis_modes)):
davis_kx = davis_modes[i]
print('Looking for "Mode ' + str(i+1) + '" in Davis paper -- kx =',
'(%.5g+%.5gj) rad/um' % (davis_kx.real / nu.um**-1, davis_kx.imag / nu.um**-1))
which_kx = np.argmin(abs(np.array(kx_list) - davis_kx))
my_kx = kx_list[which_kx]
print('Relative error: ',
abs(my_kx - davis_kx) / (abs(my_kx) + abs(davis_kx)))
print('---')
print('Are the last two modes missing? They were for me. Re-trying with a')
print('smaller search domain (zoomed towards kx=0). (By the way, ')
print('using a larger number for grid_points would also work here.)')
print('---')
kx_list2 = find_kx(params, show_progress=False,
search_domain=[-0.05/nu.nm, 0.05/nu.nm, 0, 0.1/nu.nm],
grid_points=20, iterations=10, reduction_factor=9,
plot_full_region=True)
print('kx_list2 -- ' + str(len(kx_list2)) + ' entries...')
print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list2])
for i in range(len(davis_modes)):
davis_kx = davis_modes[i]
print('Looking for "Mode ' + str(i+1) + '" in Davis paper -- kx =',
'(%.5g+%.5gj) rad/um' % (davis_kx.real / nu.um**-1, davis_kx.imag / nu.um**-1))
which_kx = np.argmin(abs(np.array(kx_list2) - davis_kx))
my_kx = kx_list2[which_kx]
print('Relative error: ',
abs(my_kx - davis_kx) / (abs(my_kx) + abs(davis_kx)))
new_params = deepcopy(params)
new_params['kx'] = my_kx
new_params = find_all_params_from_kx(new_params)
plt.figure()
plt.title('"Mode ' + str(i+1) + '" in Davis paper -- Plot of Re(Hy) and Im(Hy)')
zs = np.linspace(-300 * nu.nm, 500 * nu.nm, num=400)
Hs = np.array([Hy(z, new_params) for z in zs])
plt.plot(zs / nu.nm, Hs.real / max(abs(Hs)),
zs / nu.nm, Hs.imag / max(abs(Hs)))
plt.xlabel('z (nm)')
plt.ylabel('Hy (arbitrary units)')
def test_SIS():
"""
Test spoof-plasmon / insulator / spoof-plasmon structure calculation
against analytical formula in Kats et al, "Spoof plasmon analogue of
metal-insulator-metal waveguides", http://dx.doi.org/10.1364/OE.19.014860
This tests mu != 1 and epsilon_z != epsilon_x code and formulas
"""
def is_symmetric_mode(beta, k0, g, a_over_d, h):
"""
Eq (7i) of paper
beta is what I call kx
k0 is vacuum angular wavenumber
g is thickness of air layer
h is thickness of corrugated layer
a_over_d is the fraction of corrugated layer which is air
"""
lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)
* cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))
rhs = a_over_d * cmath.tan(k0 * h)
return floats_are_equal(lhs, rhs, tol=1e-4)
def is_antisymmetric_mode(beta, k0, g, a_over_d, h):
"""
Eq (7ii) of paper
"""
lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)
/ cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))
rhs = a_over_d * cmath.tan(k0 * h)
return floats_are_equal(lhs, rhs, tol=1e-4)
# Choose some parameters (can be anything, these are from Fig. 3 caption)
w = 2 * pi * (4 * nu.THz)
h = 50 * nu.um
g = 50 * nu.um
a_over_d = 0.1
# Now run analysis
k0 = w / nu.c0
d_over_a = a_over_d**-1
# epsilon of a PEC (perfect electric conductor) is -infinity, but code
# doesn't allow that. Use big value instead...
PEC_eps = -1e11
params = {'d_list': [inf, h, g, h, inf],
'ex_list': [PEC_eps, d_over_a, 1, d_over_a, PEC_eps],
'ez_list': [PEC_eps, PEC_eps, 1, PEC_eps, PEC_eps],
'mu_list': [1, a_over_d, 1, a_over_d, 1],
'w': w}
kx_list = find_kx(params, grid_points=30, iterations=11, reduction_factor=14,
plot_full_region=True,
search_domain=[-1e5 * nu.m**-1, 1e5 * nu.m**-1, 0, 1e5 * nu.m**-1])
print('kx_list -- ' + str(len(kx_list)) + ' entries...')
print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
# Here, I'm only interested in solutions on the positive real axis
kx_list = [kx for kx in kx_list if abs(kx.real) > 1e5 * abs(kx.imag)]
kx_list = [-kx if kx.real < 0 else kx for kx in kx_list]
# Delete repeats with tolerance 1e-4
kx_list_norepeat = []
for kx in kx_list:
if not any(floats_are_equal(kx, kx2, tol=1e-4) for kx2 in kx_list_norepeat):
kx_list_norepeat.append(kx)
kx_list = kx_list_norepeat
print('kx_list (cleaned up) -- ' + str(len(kx_list)) + ' entries...')
print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
found_sym_mode = False
for kx in kx_list:
if is_symmetric_mode(kx, k0, g, a_over_d, h):
found_sym_mode = True
print('Found symmetric mode! ',
'(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))
params2 = deepcopy(params)
params2['kx'] = kx
params2 = find_all_params_from_kx(params2)
if check_mode(params2) is not True:
print('Not a real mode? ... Error code:')
print(check_mode(params2))
else:
plot_mode(params2)
assert found_sym_mode
found_anti_mode = False
for kx in kx_list:
if is_antisymmetric_mode(kx, k0, g, a_over_d, h):
found_anti_mode = True
print('Found antisymmetric mode! ',
'(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))
params2 = deepcopy(params)
params2['kx'] = kx
params2 = find_all_params_from_kx(params2)
if check_mode(params2) is not True:
print('Not a real mode? ... Error code:')
print(check_mode(params2))
else:
plot_mode(params2)
assert found_anti_mode
print('Congratulations, the solver found the correct kx for both the')
print('symmetric and antisymmetric mode of the structure, consistent')
print('with the analytical formula in the literature.')
#########################################################################
############################# EXAMPLES ##################################
#########################################################################
def example1():
"""
A 300 THz wave is in an air-metal-insulator structure, where both the
metal and insulator are anisotropic and magnetic. For the metal,
epsilon_x = -5+2j, epsilon_z = -3+3j, mu=1.2. For the insulator,
epsilon_x = 10, epsilon_z = 7, mu=1.3. The metal is 40nm thick.
Goal: Find the few lowest-order modes; for each, display the kz values and
the relation between Ex(0,0) and the total power flow in the mode.
"""
params = {'w': 2 * pi * 300 * nu.THz,
'd_list': [inf, 40 * nu.nm, inf],
'ex_list': [1, -5 + 2j, 10],
'ez_list': [1, -3 + 3j, 7],
'mu_list': [1, 1.2, 1.3]}
kx_list = find_kx(params, show_progress=False, grid_points=30,
iterations=8, reduction_factor=14,
plot_full_region=True)
print('kx_list: ',
['(%.4g+%.4gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
for kx in kx_list:
new_params = deepcopy(params)
new_params['kx'] = kx
print('---')
print('With kx =', '(%.4g+%.4gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1),
', checking mode...')
new_params = find_all_params_from_kx(new_params)
print('kz in each layer:',
['(%.4g+%.4gj) rad/um' % (kz.real / nu.um**-1, kz.imag / nu.um**-1)
for kz in new_params['kz_list']])
check_mode_results = check_mode(new_params)
if check_mode_results is not True:
print('This seems not to be a real mode. Error code:')
print(check_mode_results)
else:
print('The mode passes all tests! Plotting...')
plot_mode(new_params)
scale_factor = (5 * nu.nW/nu.um) / new_params['Sx_total']
scaled_params = rescale_fields(scale_factor, new_params)
print('If this wave carries 5 nW/um power (i.e. 5 nW travels in +x-direction')
print('through the surface x=0, 0<y<1um, -inf<z<inf)')
print('then |Ex(0,0)|=',
abs(Ex(0, scaled_params)) / (nu.V/nu.m), 'V/m')
def example2():
"""
A 300 THz wave is at an air-metal interface, where the metal is anisotropic
and magnetic. Goal: Same as example1()
"""
params = {'w': 2 * pi * 300 * nu.THz,
'd_list': [inf, inf],
'ex_list': [1, -5],
'ez_list': [1, 1],
'mu_list': [1.1, 1.3]}
kx_list = find_kx(params, show_progress=False, grid_points=30,
iterations=8, reduction_factor=14,
plot_full_region=True)
print('kx_list: ',
['(%.4g+%.4gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)
for kx in kx_list])
for kx in kx_list:
new_params = deepcopy(params)
new_params['kx'] = kx
print('---')
print('With kx =', '(%.4g+%.4gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1),
', checking mode...')
new_params = find_all_params_from_kx(new_params)
print('kz in each layer:',
['(%.4g+%.4gj) rad/um' % (kz.real / nu.um**-1, kz.imag / nu.um**-1)
for kz in new_params['kz_list']])
check_mode_results = check_mode(new_params)
if check_mode_results is not True:
print('This seems not to be a real mode. Error code:')
print(check_mode_results)
else:
print('The mode passes all tests! Plotting...')
plot_mode(new_params)
scale_factor = (5 * nu.nW/nu.um) / new_params['Sx_total']
scaled_params = rescale_fields(scale_factor, new_params)
print('If this wave carries 5 nW/um power (i.e. 5 nW travels in +x-direction')
print('through the surface x=0, 0<y<1um, -inf<z<inf)')
print('then |Ex(0,0)|=',
abs(Ex(0, scaled_params)) / (nu.V/nu.m), 'V/m')
| mit |
katsikas/gnuradio | gnuradio-core/src/examples/pfb/synth_filter.py | 17 | 2270 | #!/usr/bin/env python
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = gr.sig_source_c(fs, gr.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = gr.pfb_synthesizer_ccf(nchans, taps)
head = gr.head(gr.sizeof_gr_complex, N)
snk = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
lekshmideepu/nest-simulator | pynest/examples/sensitivity_to_perturbation.py | 8 | 8858 | # -*- coding: utf-8 -*-
#
# sensitivity_to_perturbation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Sensitivity to perturbation
---------------------------
This script simulates a network in two successive trials, which are identical
except for one extra input spike in the second realization (a small
perturbation). The network consists of recurrent, randomly connected excitatory
and inhibitory neurons. Its activity is driven by an external Poisson input
provided to all neurons independently. In order to ensure that the network is
reset appropriately between the trials, we do the following steps:
- resetting the network
- resetting the random network generator
- resetting the internal clock
- deleting all entries in the spike recorder
- introducing a hyperpolarization phase between the trials
(in order to avoid that spikes remaining in the NEST memory
after the first simulation are fed into the second simulation)
"""
###############################################################################
# Importing all necessary modules for simulation, analysis and plotting.
import numpy
import matplotlib.pyplot as plt
import nest
###############################################################################
# Here we define all parameters necessary for building and simulating the
# network.
# We start with the global network parameters.
NE = 1000 # number of excitatory neurons
NI = 250 # number of inhibitory neurons
N = NE + NI # total number of neurons
KE = 100 # excitatory in-degree
KI = 25 # inhibitory in-degree
###############################################################################
# Parameters specific for the neurons in the network. The default values of
# the reset potential ``E_L`` and the spiking threshold ``V_th`` are used to set
# the limits of the initial potential of the neurons.
neuron_model = 'iaf_psc_delta'
neuron_params = nest.GetDefaults(neuron_model)
Vmin = neuron_params['E_L'] # minimum of initial potential distribution (mV)
Vmax = neuron_params['V_th'] # maximum of initial potential distribution (mV)
###############################################################################
# Synapse parameters. Changing the weights `J` in the network can lead to
# qualitatively different behaviors. If `J` is small (e.g. ``J = 0.1``), we
# are likely to observe a non-chaotic network behavior (after perturbation
# the network returns to its original activity). Increasing `J`
# (e.g ``J = 5.5``) leads to rather chaotic activity. Given that in this
# example the transition to chaos is probabilistic, we sometimes observe
# chaotic behavior for small weights (e.g. ``J = 0.5``) and non-chaotic
# behavior for strong weights (e.g. ``J = 5.4``).
J = 0.5 # excitatory synaptic weight (mV)
g = 6. # relative inhibitory weight
delay = 0.1 # spike transmission delay (ms)
# External input parameters.
Jext = 0.2 # PSP amplitude for external Poisson input (mV)
rate_ext = 6500. # rate of the external Poisson input
# Perturbation parameters.
t_stim = 400. # perturbation time (time of the extra spike)
Jstim = Jext # perturbation amplitude (mV)
# Simulation parameters.
T = 1000. # simulation time per trial (ms)
fade_out = 2. * delay # fade out time (ms)
dt = 0.01 # simulation time resolution (ms)
seed_NEST = 30 # seed of random number generator in Nest
seed_numpy = 30 # seed of random number generator in NumPy
senders = []
spiketimes = []
###############################################################################
# we run the two simulations successively. After each simulation the
# sender ids and spiketimes are stored in a list (``senders``, ``spiketimes``).
for trial in [0, 1]:
# Before we build the network, we reset the simulation kernel to ensure
# that previous NEST simulations in the Python shell will not disturb this
# simulation and set the simulation resolution (later defined
# synaptic delays cannot be smaller than the simulation resolution).
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt})
###############################################################################
# Now we start building the network and create excitatory and inhibitory nodes
# and connect them. According to the connectivity specification, each neuron
# is assigned random KE synapses from the excitatory population and random KI
# synapses from the inhibitory population.
nodes_ex = nest.Create(neuron_model, NE)
nodes_in = nest.Create(neuron_model, NI)
allnodes = nodes_ex + nodes_in
nest.Connect(nodes_ex, allnodes,
conn_spec={'rule': 'fixed_indegree', 'indegree': KE},
syn_spec={'weight': J, 'delay': dt})
nest.Connect(nodes_in, allnodes,
conn_spec={'rule': 'fixed_indegree', 'indegree': KI},
syn_spec={'weight': -g * J, 'delay': dt})
###############################################################################
# Afterwards we create a ``poisson_generator`` that provides spikes (the external
# input) to the neurons until time ``T`` is reached.
# Afterwards a ``dc_generator``, which is also connected to the whole population,
# provides a strong hyperpolarization step for a short time period ``fade_out``.
#
# The ``fade_out`` period has to last at least twice as long as the simulation
# resolution to suppress the neurons from firing.
ext = nest.Create("poisson_generator",
params={'rate': rate_ext, 'stop': T})
nest.Connect(ext, allnodes,
syn_spec={'weight': Jext, 'delay': dt})
suppr = nest.Create("dc_generator",
params={'amplitude': -1e16, 'start': T,
'stop': T + fade_out})
nest.Connect(suppr, allnodes)
spikerecorder = nest.Create("spike_recorder")
nest.Connect(allnodes, spikerecorder)
###############################################################################
# We then create the ``spike_generator``, which provides the extra spike
# (perturbation).
stimulus = nest.Create("spike_generator")
stimulus.spike_times = []
###############################################################################
# We need to reset the random number generator and the clock of
# the simulation Kernel. In addition, we ensure that there is no spike left in
# the spike recorder.
nest.SetKernelStatus({"rng_seed": seed_NEST, 'biological_time': 0.0})
spikerecorder.n_events = 0
# We assign random initial membrane potentials to all neurons
numpy.random.seed(seed_numpy)
Vms = Vmin + (Vmax - Vmin) * numpy.random.rand(N)
allnodes.V_m = Vms
##############################################################################
# In the second trial, we add an extra input spike at time ``t_stim`` to the
# neuron that fires first after perturbation time ``t_stim``. Thus, we make sure
# that the perturbation is transmitted to the network before it fades away in
# the perturbed neuron. (Single IAF-neurons are not chaotic.)
if trial == 1:
id_stim = [senders[0][spiketimes[0] > t_stim][0]]
nest.Connect(stimulus, nest.NodeCollection(id_stim),
syn_spec={'weight': Jstim, 'delay': dt})
stimulus.spike_times = [t_stim]
# Now we simulate the network and add a fade out period to discard
# remaining spikes.
nest.Simulate(T)
nest.Simulate(fade_out)
# Storing the data.
senders += [spikerecorder.get('events', 'senders')]
spiketimes += [spikerecorder.get('events', 'times')]
###############################################################################
# We plot the spiking activity of the network (first trial in red, second trial
# in black).
plt.figure(1)
plt.clf()
plt.plot(spiketimes[0], senders[0], 'ro', ms=4.)
plt.plot(spiketimes[1], senders[1], 'ko', ms=2.)
plt.xlabel('time (ms)')
plt.ylabel('neuron id')
plt.xlim((0, T))
plt.ylim((0, N))
plt.show()
| gpl-2.0 |
Aghosh993/QuadcopterCodebase | GroundSoftware/telemetry_tools/LearningMaterial/scope_sample.py | 1 | 4440 | #!/usr/bin/python2
"""
Emulate an oscilloscope. Requires the animation API introduced in
matplotlib 1.0 SVN.
This example is based partly on http://matplotlib.org/examples/animation/strip_chart_demo.html (with modifications to adapt to streaming data)
"""
# (c) 2016, Abhimanyu Ghosh
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
# A class to plot a stream of data on an animated Matplotlib window.
# "maxt" essentially sizes the width of the scope plot
# "dt" essentially affects the resolution of the plot in terms of samples/window
# "zoom_step" affects how fast the scroll wheel zooms in/out in time
class Scope(object):
def __init__(self, ax, maxt=10, dt=0.02, zoom_step=1.5):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata)
self.ax.add_line(self.line)
self.ax.set_ylim(-1000, 10000)
self.ax.set_xlim(0, self.maxt)
self.zoom_step = zoom_step
# A function to auto-resize the Y-axis in response to the true range of data from the previous "maxt" worth of data points
def resize_y(self, vert_buffer=0.2):
# First get the Y-axis limits at present:
vert_limits = self.ax.get_ylim()
ymin = vert_limits[0]
ymax = vert_limits[1]
# Counters to track data max and min:
ydata_max = 0.0
ydata_min = 0.0
# Look through the data to find true max and min:
for i in range(len(self.ydata)):
if self.ydata[i] > ydata_max:
ydata_max = self.ydata[i]
if self.ydata[i] < ydata_min:
ydata_min = self.ydata[i]
# Update desired Y-axis limits, and add in a buffer space so points at the edge are more visible:
ymax = ydata_max + vert_buffer
ymin = ydata_min - vert_buffer
# Propagate these changes to the plot:
self.ax.set_ylim(ymin, ymax)
def update(self, y):
# Get the last "x-axis" point in the array for that axis:
lastt = self.tdata[-1]
# If we're at the end of a horizontal period (i.e. "maxt" number of points collected)
# we reset the array elements, recompute Y-axis limits and shift the X-axis forward another "maxt" amount:
if lastt > self.tdata[0] + self.maxt:
self.tdata = [self.tdata[-1]]
self.resize_y()
self.ydata = [self.ydata[-1]]
self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)
self.ax.figure.canvas.draw()
# We keep putting new data into the Y-axis buffer and adding to the X-axis:
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
return self.line,
# A callback for Matplotlib to go to when the user scrolls in/out while having cursor over the plot area:
def time_zoom_handler(self, event):
# A simple exponential zoom. event.step determines if the user is scrolling in/out
# and thus the direction of our zooming action:
zoom_multiplier = 1.0
zoom_multiplier *= math.exp(event.step*self.zoom_step)
self.maxt *= zoom_multiplier
# If the user is zooming out, we want to trigger a re-draw so we're not waiting forever
# for a whole new set of data... just "stretch" the existing data to fit the new range
# and resize the X-axis appropriately:
if self.tdata[-1] < self.tdata[0] + self.maxt:
self.line.set_data(self.tdata, self.ydata)
self.ax.set_xlim(self.tdata[0], self.tdata[-1])
self.ax.figure.canvas.draw()
self.tdata = [self.tdata[-1]]
self.ydata = [self.ydata[-1]]
self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)
# A stand-in function to generate the data to be plotted:
def emitter(p=0.03):
# Sine wave instead...
t = 0.0
while True:
t += 0.01
yield math.sin(t*10.0)
fig, ax = plt.subplots()
scope = Scope(ax, zoom_step=0.2)
f2 = ax.get_figure()
# pass a generator in "emitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, emitter, interval=2, blit=True)
f2.canvas.mpl_connect('scroll_event', scope.time_zoom_handler)
plt.show() | gpl-3.0 |
deepakantony/sms-tools | software/models_interface/dftModel_function.py | 21 | 2413 | # function to call the main analysis/synthesis functions in software/models/dftModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import dftModel as DFT
def main(inputFile = '../../sounds/piano.wav', window = 'blackman', M = 511, N = 1024, time = .2):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (choice of rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size (odd integer value)
N: fft size (power of two, bigger or equal than than M)
time: time to start analysis (in seconds)
"""
# read input sound (monophonic with sampling rate of 44100)
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# get a fragment of the input sound of size M
sample = int(time*fs)
if (sample+M >= x.size or sample < 0): # raise error if time outside of sound
raise ValueError("Time outside sound boundaries")
x1 = x[sample:sample+M]
# compute the dft of the sound fragment
mX, pX = DFT.dftAnal(x1, w, N)
# compute the inverse dft of the spectrum
y = DFT.dftSynth(mX, pX, w.size)*sum(w)
# create figure
plt.figure(figsize=(12, 9))
# plot the sound fragment
plt.subplot(4,1,1)
plt.plot(time + np.arange(M)/float(fs), x1)
plt.axis([time, time + M/float(fs), min(x1), max(x1)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrum
plt.subplot(4,1,2)
plt.plot(float(fs)*np.arange(mX.size)/float(N), mX, 'r')
plt.axis([0, fs/2.0, min(mX), max(mX)])
plt.title ('magnitude spectrum: mX')
plt.ylabel('amplitude (dB)')
plt.xlabel('frequency (Hz)')
# plot the phase spectrum
plt.subplot(4,1,3)
plt.plot(float(fs)*np.arange(pX.size)/float(N), pX, 'c')
plt.axis([0, fs/2.0, min(pX), max(pX)])
plt.title ('phase spectrum: pX')
plt.ylabel('phase (radians)')
plt.xlabel('frequency (Hz)')
# plot the sound resulting from the inverse dft
plt.subplot(4,1,4)
plt.plot(time + np.arange(M)/float(fs), y)
plt.axis([time, time + M/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
gamahead/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
hitszxp/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
emhuff/regularizedInversion | makeLikelihoodPlots.py | 1 | 23132 | #!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
import desdb
import numpy as np
import esutil
import pyfits
import sys
import argparse
import healpy as hp
import os
import functions2
import slr_zeropoint_shiftmap as slr
import numpy.lib.recfunctions as rf
import matplotlib.pyplot as plt
def NoSimFields(band='i'):
q = """
SELECT
balrog_index,
mag_auto,
flags
FROM
SUCHYTA1.balrog_sva1v2_nosim_%s
""" %(band)
return q
def SimFields(band='i',table='sva1v2'):
q = """
SELECT
t.tilename as tilename,
m.xwin_image as xwin_image,
m.ywin_image as ywin_image,
m.xmin_image as xmin_image,
m.ymin_image as ymin_image,
m.xmax_image as xmax_image,
m.ymax_image as ymax_image,
m.balrog_index as balrog_index,
m.alphawin_j2000 as ra,
m.deltawin_j2000 as dec,
m.mag_auto as mag_auto,
m.spread_model as spread_model,
m.spreaderr_model as spreaderr_model,
m.class_star as class_star,
m.mag_psf as mag_psf,
t.mag as truth_mag_auto,
m.flags as flags
FROM
SUCHYTA1.balrog_%s_sim_%s m
JOIN SUCHYTA1.balrog_%s_truth_%s t ON t.balrog_index = m.balrog_index
""" %(table, band, table, band)
return q
def DESFields(tilestuff, band='i'):
q = """
SELECT
tilename,
coadd_objects_id,
mag_auto_%s as mag_auto,
alphawin_j2000_%s as ra,
deltawin_j2000_%s as dec,
spread_model_%s as spread_model,
spreaderr_model_%s as spreaderr_model,
class_star_%s as class_star,
mag_psf_%s as mag_psf,
flags_%s as flags
FROM
sva1_coadd_objects
WHERE
tilename in %s
""" % (band,band,band,band,band,band,band,band,str(tuple(np.unique(tilestuff['tilename']))))
return q
def TruthFields(band='i', table = 'sva1v2'):
q = """
SELECT
balrog_index,
tilename,
ra,
dec,
objtype,
mag
FROM
SUCHYTA1.balrog_%s_truth_%s
"""%(table,band)
return q
def GetDESCat( depthmap, nside, tilestuff, tileinfo, band='i',depth = 0.0):
cur = desdb.connect()
q = DESFields(tileinfo, band=band)
detcat = cur.quick(q, array=True)
detcat = functions2.ValidDepth(depthmap, nside, detcat, rakey='ra', deckey='dec',depth = depth)
detcat = functions2.RemoveTileOverlap(tilestuff, detcat, col='tilename', rakey='ra', deckey='dec')
return detcat
def getTileInfo(catalog, HealConfig=None):
if HealConfig is None:
HealConfig = getHealConfig()
tiles = np.unique(catalog['tilename'])
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
max = np.power(map_nside/float(HealConfig['out_nside']), 2.0)
depthmap, nside = functions2.GetDepthMap(HealConfig['depthfile'])
return depthmap, nside
def cleanCatalog(catalog, tag='mag_auto'):
# We should get rid of obviously wrong things.
keep = np.where( (catalog[tag] > 15. ) & (catalog[tag] < 30.) & (catalog['flags'] < 2) )
return catalog[keep]
def removeBadTilesFromTruthCatalog(truth, tag='mag_auto', goodfrac = 0.8):
tileList = np.unique(truth['tilename'])
number = np.zeros(tileList.size)
for tile, i in zip(tileList,xrange(number.size)):
number[i] = np.sum(truth['tilename'] == tile)
tileList = tileList[number > goodfrac*np.max(number)]
keep = np.in1d( truth['tilename'], tileList )
return truth[keep]
def mergeCatalogsUsingPandas(sim=None, truth=None, key='balrog_index', suffixes = ['_sim','']):
import pandas as pd
simData = pd.DataFrame(sim)
truthData = pd.DataFrame(truth)
matched = pd.merge(simData, truthData, on=key, suffixes = suffixes)
matched_arr = matched.to_records(index=False)
# This last step is necessary because Pandas converts strings to Objects when eating structured arrays.
# And np.recfunctions flips out when it has one.
oldDtype = matched_arr.dtype.descr
newDtype = oldDtype
for thisOldType,i in zip(oldDtype, xrange(len(oldDtype) )):
if 'O' in thisOldType[1]:
newDtype[i] = (thisOldType[0], 'S12')
matched_arr = np.array(matched_arr,dtype=newDtype)
return matched_arr
def GetFromDB( band='i', depth = 0.0,tables =['sva1v2','sva1v3_2']): # tables =['sva1v2','sva1v3','sva1v3_2']
depthfile = '../sva1_gold_1.0.2-4_nside4096_nest_i_auto_weights.fits'
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
depthmap, nside = functions2.GetDepthMap(depthfile)
truths = []
sims = []
truthMatcheds = []
for tableName in tables:
q = TruthFields(band=band,table=tableName)
truth = cur.quick(q, array=True)
truth = removeBadTilesFromTruthCatalog(truth)
truth = functions2.ValidDepth(depthmap, nside, truth, depth = depth)
truth = functions2.RemoveTileOverlap(tilestuff, truth)
unique_binds, unique_inds = np.unique(truth['balrog_index'],return_index=True)
truth = truth[unique_inds]
q = SimFields(band=band, table=tableName)
sim = cur.quick(q, array=True)
sim = cleanCatalog(sim,tag='mag_auto')
unique_binds, unique_inds = np.unique(sim['balrog_index'],return_index=True)
sim = sim[unique_inds]
truthMatched = mergeCatalogsUsingPandas(sim=sim,truth=truth)
sim = sim[np.in1d(sim['balrog_index'],truthMatched['balrog_index'])]
sim.sort(order='balrog_index')
truthMatched.sort(order='balrog_index')
truthMatcheds.append(truthMatched)
truths.append(truth)
sims.append(sim)
sim = np.hstack(sims)
truth = np.hstack(truths)
truthMatched = np.hstack(truthMatcheds)
des = GetDESCat(depthmap, nside, tilestuff, sim, band=band,depth = depth)
des = cleanCatalog(des, tag='mag_auto')
return des, sim, truthMatched, truth, tileinfo
def getCatalogs(reload=False,band='i'):
# Check to see whether the catalog files exist. If they do, then
# use the files. If at least one does not, then get what we need
# from the database
fileNames = ['desCatalogFile-'+band+'.fits','BalrogObsFile-'+band+'.fits',
'BalrogTruthFile-'+band+'.fits', 'BalrogTruthMatchedFile-'+band+'.fits',
'BalrogTileInfo.fits']
exists = True
for thisFile in fileNames:
print "Checking for existence of: "+thisFile
if not os.path.isfile(thisFile): exists = False
if exists and not reload:
desCat = esutil.io.read(fileNames[0])
BalrogObs = esutil.io.read(fileNames[1])
BalrogTruth = esutil.io.read(fileNames[2])
BalrogTruthMatched = esutil.io.read(fileNames[3])
BalrogTileInfo = esutil.io.read(fileNames[4])
else:
print "Cannot find files, or have been asked to reload. Getting data from DESDB."
desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo = GetFromDB(band=band)
esutil.io.write( fileNames[0], desCat , clobber=True)
esutil.io.write( fileNames[1], BalrogObs , clobber=True)
esutil.io.write( fileNames[2], BalrogTruth , clobber=True)
esutil.io.write( fileNames[3], BalrogTruthMatched , clobber=True)
esutil.io.write( fileNames[4], BalrogTileInfo, clobber=True)
return desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo
def hpHEALPixelToRaDec(pixel, nside=4096, nest=True):
theta, phi = hp.pix2ang(nside, pixel, nest=nest)
ra, dec = convertThetaPhiToRaDec(theta, phi)
return ra, dec
def hpRaDecToHEALPixel(ra, dec, nside= 4096, nest= True):
phi = ra * np.pi / 180.0
theta = (90.0 - dec) * np.pi / 180.0
hpInd = hp.ang2pix(nside, theta, phi, nest= nest)
return hpInd
def convertThetaPhiToRaDec(theta, phi):
ra = phi*180.0/np.pi
dec = 90.0 - theta*180.0/np.pi
return ra,dec
def convertRaDecToThetaPhi(ra, dec):
theta = (90.0 - dec) * np.pi / 180.0
phi = ra * np.pi / 180.0
return theta, phi
def HealPixifyCatalogs(catalog=None, healConfig=None, ratag='ra', dectag = 'dec'):
HealInds = hpRaDecToHEALPixel( catalog[ratag],catalog[dectag], nside= healConfig['map_nside'], nest= healConfig['nest'])
healCat = rf.append_fields(catalog,'HEALIndex',HealInds,dtypes=HealInds.dtype)
return healCat
def getHealConfig(map_nside = 4096, out_nside = 128, depthfile = '../sva1_gold_1.0.2-4_nside4096_nest_i_auto_weights.fits'):
HealConfig = {}
HealConfig['map_nside'] = map_nside
HealConfig['out_nside'] = out_nside
HealConfig['finer_nside'] = map_nside
HealConfig['depthfile'] = depthfile
HealConfig['nest'] = True
return HealConfig
def chooseBins(catalog = None, tag=None, binsize = None, upperLimit = None, lowerLimit = None):
if binsize is None:
binsize = 2*( np.percentile(catalog[tag], 75) - np.percentile( catalog[tag], 25 ) ) / (catalog.size ) **(1./3.)
if upperLimit is None:
upperLimit = np.max(catalog[tag])
if lowerLimit is None:
lowerLimit = np.min(catalog[tag])
nbins = int( np.ceil( (upperLimit - lowerLimit) / binsize) )
nEdge = nbins+1
bins = lowerLimit + binsize * np.arange(nEdge)
bins[0] = bins[0] - 0.001*binsize
bins[-1] = bins[-1] + 0.001*binsize
return bins
def makeLikelihoodMatrix( sim=None, truth=None, truthMatched = None, Lcut = 0.,
obs_bins = None, truth_bins = None, simTag = None, truthTag = None):
obs_bin_index = np.digitize(sim[simTag], obs_bins) - 1
truth_bin_index = np.digitize(truthMatched[truthTag], truth_bins) - 1
# Limit loop to objects in the given bin ranges.
nbins_truth = truth_bins.size -1
nbins_obs = obs_bins.size - 1
good = ((truth_bin_index > 0) & (truth_bin_index < nbins_truth) &
(obs_bin_index > 0) & (obs_bin_index < nbins_obs) )
obs_bin_index = obs_bin_index[good]
truth_bin_index = truth_bin_index[good]
N_truth, _ = np.histogram( truth[truthTag], bins=truth_bins )
L = np.zeros( (nbins_obs, nbins_truth) )
II = np.zeros( (nbins_obs, nbins_truth) )
for i in xrange(obs_bin_index.size):
if N_truth[truth_bin_index[i]] > 1:
L[obs_bin_index[i], truth_bin_index[i]] = ( L[obs_bin_index[i], truth_bin_index[i]] +
1./N_truth[truth_bin_index[i]] )
II[obs_bin_index[i], truth_bin_index[i]] = II[obs_bin_index[i], truth_bin_index[i]]+1
L[II < 1] = 0.
L[L < Lcut] = 0.
return L
def getAllLikelihoods( truth=None, sim=None, truthMatched = None, healConfig=None , doplot = False, getBins = False,
ratag= 'ra', dectag = 'dec', obs_bins = None, truth_bins = None, obsTag = 'mag_auto', truthTag = 'mag'):
if healConfig is None:
healConfig = getHealConfig()
truth = HealPixifyCatalogs(catalog=truth, healConfig=healConfig, ratag=ratag, dectag = dectag)
sim = HealPixifyCatalogs(catalog=sim, healConfig=healConfig, ratag=ratag, dectag = dectag)
truthMatched = HealPixifyCatalogs(catalog=truthMatched, healConfig=healConfig, ratag=ratag, dectag = dectag)
useInds = np.unique(sim['HEALIndex'])
if obs_bins is None:
obs_bins = chooseBins(catalog=sim, tag = obsTag, binsize=0.5,upperLimit=24.5,lowerLimit=15.)
if truth_bins is None:
truth_bins = chooseBins(catalog = truthMatched, tag = truthTag, binsize = 0.5,upperLimit=26.,lowerLimit=15)
truth_bin_centers = (truth_bins[0:-1] + truth_bins[1:])/2.
obs_bin_centers = (obs_bins[0:-1] + obs_bins[1:])/2.
Lensemble = np.empty( (obs_bins.size-1 , truth_bins.size-1, useInds.size) )
if doplot is True:
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm
pp = PdfPages('likelihoods.pdf')
fig,ax = plt.subplots(figsize=(6.,6.))
# Make a plot of the likelihood of the whole region.
masterLikelihood = makeLikelihoodMatrix( sim=sim, truth=truth, truthMatched = truthMatched, Lcut = 0.,
obs_bins = obs_bins, truth_bins = truth_bins, simTag = obsTag, truthTag = truthTag)
im = ax.imshow(masterLikelihood, origin='lower',cmap=plt.cm.Greys, norm = LogNorm(vmin=1e-6,vmax=1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax.set_xlabel('truth mag.')
ax.set_ylabel('measured mag.')
ax.set_title('full area likelihood')
fig.colorbar(im,ax=ax)
pp.savefig(fig)
for hpIndex,i in zip(useInds,xrange(useInds.size)):
thisSim = sim[sim['HEALIndex'] == hpIndex]
thisTruth = truth[truth['HEALIndex'] == hpIndex]
thisTruthMatched = truthMatched[sim['HEALIndex'] == hpIndex]
if thisTruth.size > 100:
thisLikelihood = makeLikelihoodMatrix( sim=thisSim, truth=thisTruth, truthMatched = thisTruthMatched, Lcut = 1.e-3,
obs_bins = obs_bins, truth_bins = truth_bins, simTag = obsTag, truthTag = truthTag)
Lensemble[:,:,i] = thisLikelihood
if doplot is True:
fig,ax = plt.subplots(figsize = (6.,6.))
im = ax.imshow(thisLikelihood, origin='lower',cmap=plt.cm.Greys, norm = LogNorm(vmin=1e-6,vmax=1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax.set_xlabel('truth mag.')
ax.set_ylabel('measured mag.')
ax.set_title('nside= '+str(healConfig['map_nside'])+', HEALPixel= '+str(hpIndex) )
fig.colorbar(im,ax=ax)
pp.savefig(fig)
if doplot is True:
pp.close()
if getBins is False:
return Lensemble, useInds, masterLikelihood, truth_bin_centers, obs_bin_centers
if getBins is True:
return Lensemble, useInds, masterLikelihood, truth_bins, obs_bins
def getGoodRegionIndices(catalog=None, badHPInds=None, nside=4096):
hpInd = hpRaDecToHEALPixel(catalog['ra'], catalog['dec'], nside=nside, nest= True)
keep = ~np.in1d(hpInd, badHPInds)
return keep
def excludeBadRegions(des,balrogObs, balrogTruthMatched, balrogTruth, band='i'):
eliMap = hp.read_map("sva1_gold_1.0.4_goodregions_04_equ_nest_4096.fits", nest=True)
nside = hp.npix2nside(eliMap.size)
maskIndices = np.arange(eliMap.size)
badIndices = maskIndices[eliMap == 1]
obsKeepIndices = getGoodRegionIndices(catalog=balrogObs, badHPInds=badIndices, nside=nside)
truthKeepIndices = getGoodRegionIndices(catalog=balrogTruth, badHPInds=badIndices, nside=nside)
desKeepIndices = getGoodRegionIndices(catalog=des, badHPInds=badIndices, nside=nside)
balrogObs = balrogObs[obsKeepIndices]
balrogTruthMatched = balrogTruthMatched[obsKeepIndices]
balrogTruth = balrogTruth[truthKeepIndices]
des = des[desKeepIndices]
return des,balrogObs, balrogTruthMatched, balrogTruth
def likelihoodPCA(likelihood= None, likelihood_master = None, doplot = False, band = None,
extent = None):
# This does a simple PCA on the array of likelihood matrices to find a compact basis with which to represent the likelihood.
origShape = np.shape(likelihood)
likelihood_1d = np.reshape(likelihood, (origShape[0]*origShape[1], origShape[2]))
L1d_master = np.reshape(likelihood_master, origShape[0]*origShape[1])
# Subtract L1d_master from each row of L1d:
#for i in xrange(origShape[2]):
# likelihood_1d[:,i] = likelihood_1d[:,i] - L1d_master
L1d = likelihood_1d.T
U,s,Vt = np.linalg.svd(L1d,full_matrices=False)
V = Vt.T
ind = np.argsort(s)[::-1]
ind = np.argsort(s)[::-1]
U = U[:, ind]
s = s[ind]
V = V[:, ind]
#for i in xrange(origShape[2]):
# likelihood_1d[:,i] = likelihood_1d[:,i] + L1d_master
likelihood_pcomp = V.reshape(origShape)
if doplot is True:
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm, Normalize
if band is None:
print "Must supply band (g,r,i,z,Y) in order to save PCA plots."
stop
pp = PdfPages('likelihood_pca_components-'+band+'.pdf')
for i,thing in zip(xrange(s.size),s):
fig,ax = plt.subplots(nrows=1,ncols=1,figsize = (6.,6.))
im = ax.imshow( -likelihood_pcomp[:,:,i],origin='lower',cmap=plt.cm.Greys, extent = extent,vmin=-1,vmax=1)
ax.set_xlabel(band+' mag (true)')
ax.set_ylabel(band+' mag (meas)')
fig.colorbar(im,ax=ax)
pp.savefig(fig)
fig,ax = plt.subplots(1,1,figsize = (6.,6.) )
ax.plot(np.abs(s))
ax.set_yscale('log')
ax.set_xlabel('rank')
ax.set_ylabel('eigenvalue')
pp.savefig(fig)
pp.close()
return likelihood_pcomp, s
def doLikelihoodPCAfit(pcaComp = None, master = None, eigenval = None, likelihood =None, n_component = 5, Lcut = 0.):
# Perform least-squares: Find the best combination of master + pcaComps[:,:,0:n_component] that fits likelihood
origShape = likelihood.shape
L1d = likelihood - master
L1d = likelihood.reshape(likelihood.size)
pca1d = pcaComp.reshape( ( likelihood.size, pcaComp.shape[-1]) )
pcafit = pca1d[:,0:(n_component)]
m1d = np.reshape(master,master.size)
#allfit = np.hstack((m1d[:,None], pcafit) )
allfit = pcafit
coeff, resid, _, _ = np.linalg.lstsq(allfit, L1d)
bestFit = np.dot(allfit,coeff)
bestFit2d = bestFit.reshape(likelihood.shape)
#bestFit2d = bestFit2d + master
bestFit2d[bestFit2d < Lcut] = 0.
m_coeff, m_resid, _, _ = np.linalg.lstsq(allfit, m1d)
m1d_fit = np.dot(allfit, m_coeff)
m2d = np.reshape(m1d_fit,master.shape)
return bestFit2d, m2d
def main(argv):
parser = argparse.ArgumentParser(description = 'Perform magnitude distribution inference on DES data.')
parser.add_argument('filter',help='filter name',choices=['g','r','i','z','Y'])
parser.add_argument("-r","--reload",help='reload catalogs from DESDB', action="store_true")
args = parser.parse_args(argv[1:])
band = args.filter
print "performing inference in band: "+args.filter
print "Reloading from DESDM:", args.reload
des, sim, truthMatched, truth, tileInfo = getCatalogs(reload = args.reload, band = args.filter)
print "Excluding regions Eli says are bad."
des, sim, truthMatched, truth = excludeBadRegions(des,sim, truthMatched, truth,band=band)
print sim.size
# Filter out bad detections, if we can.
truth = truth[truth['mag'] > 0]
keep = (truthMatched['mag'] > 0)
sim = sim[keep]
truthMatched = truthMatched[keep]
HEALConfig = getHealConfig(map_nside = 64)
print "Getting likelihood matrices for each HEALPixel"
Likelihoods, HEALPixels, masterLikelihood, truth_bin_centers, obs_bin_centers = getAllLikelihoods(truth=truth, sim=sim,
truthMatched = truthMatched,
healConfig=HEALConfig ,doplot = True)
# Solve for the pca components.
print "Performing PCA fit over all the likelihoods."
Lpca, pcaEigen = likelihoodPCA(likelihood = Likelihoods, likelihood_master = masterLikelihood, doplot = True, band = band,
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
# Loop over the likelihoods again. Find the best low-n PCA fit to each.
L_fit = Likelihoods * 0.
# And make a plot showing the likelihood, the best fit, and the residual map.
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm, Normalize
pp = PdfPages('likelihood_pca_fit-'+band+'.pdf')
print "Making plot of all the likelihoods and their best fits"
for i in xrange(HEALPixels.size):
thisLike = Likelihoods[:,:,i]
L_fit[:,:,i], master_pca = doLikelihoodPCAfit( pcaComp = Lpca, master = masterLikelihood, Lcut = 1.e-3,
eigenval = pcaEigen, likelihood = thisLike, n_component = 4)
fig,ax = plt.subplots(nrows=1,ncols=4,sharey=True,figsize = (20.,5.))
im0 = ax[0].imshow(master_pca, origin='lower',cmap=plt.cm.Greys, norm = LogNorm(vmin=1e-6,vmax=1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax[0].set_xlabel('truth '+band+' mag.')
ax[0].set_ylabel('measured '+band+' mag.')
ax[0].set_title('Full SVA1 Balrog area')
im1 = ax[1].imshow(Likelihoods[:,:,i], origin='lower',cmap=plt.cm.Greys, norm = LogNorm(vmin=1e-6,vmax=1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax[1].set_xlabel('truth '+band+' mag.')
ax[1].set_title('Balrog likelihood, \n HEALPixel='+str(HEALPixels[i])+', nside='+str(HEALConfig['out_nside']))
im2 = ax[2].imshow(L_fit[:,:,i], origin='lower',cmap=plt.cm.Greys, norm = LogNorm(vmin=1e-6,vmax=1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax[2].set_xlabel('truth '+band+' mag.')
ax[2].set_title('PCA-smoothed Balrog likelihood')
im3 = ax[3].imshow(Likelihoods[:,:,i] - L_fit[:,:,i], origin='lower', cmap=plt.cm.Greys, norm = Normalize(vmin=-1,vmax = 1),
extent = [truth_bin_centers[0],truth_bin_centers[-1],obs_bin_centers[0],obs_bin_centers[-1]])
ax[3].set_xlabel('truth '+band+' mag.')
ax[3].set_title('residuals')
fig.colorbar(im2,ax=ax[2])
fig.colorbar(im3, ax = ax[3])
pp.savefig(fig)
pp.close()
import esutil
print "Writing likelihoods to file masterLikelihoodFile.fits"
esutil.io.write('masterLikelihoodFile.fits',Likelihoods, ext=1)
esutil.io.write('masterLikelihoodFile.fits',L_fit,ext=2)
esutil.io.write('masterLikelihoodFile.fits',HEALPixels, ext=3)
print "Done."
if __name__ == "__main__":
import pdb, traceback
try:
main(sys.argv)
except:
thingtype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| mit |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_brainstorm_auditory.py | 5 | 17033 | # -*- coding: utf-8 -*-
"""
====================================
Brainstorm auditory tutorial dataset
====================================
Here we compute the evoked from raw for the auditory Brainstorm
tutorial dataset. For comparison, see [1]_ and:
http://neuroimage.usc.edu/brainstorm/Tutorials/Auditory
Experiment:
- One subject, 2 acquisition runs 6 minutes each.
- Each run contains 200 regular beeps and 40 easy deviant beeps.
- Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.
- Button pressed when detecting a deviant with the right index finger.
The specifications of this dataset were discussed initially on the
`FieldTrip bug tracker <http://bugzilla.fcdonders.nl/show_bug.cgi?id=2300>`_.
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Mainak Jas <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
from mne.filter import notch_filter, low_pass_filter
print(__doc__)
###############################################################################
# To reduce memory consumption and running time, some of the steps are
# precomputed. To run everything from scratch change this to False. With
# ``use_precomputed = False`` running time of this script can be several
# minutes even on a fast computer.
use_precomputed = True
###############################################################################
# The data was collected with a CTF 275 system at 2400 Hz and low-pass
# filtered at 600 Hz. Here the data and empty room data files are read to
# construct instances of :class:`mne.io.Raw`.
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', 'bst_auditory',
'S01_Noise_20131218_01.ds')
###############################################################################
# In the memory saving mode we use ``preload=False`` and use the memory
# efficient IO which loads the data on demand. However, filtering and some
# other functions require the data to be preloaded in the memory.
preload = not use_precomputed
raw = read_raw_ctf(raw_fname1, preload=preload)
n_times_run1 = raw.n_times
mne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])
raw_erm = read_raw_ctf(erm_fname, preload=preload)
###############################################################################
# Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference
# sensors and 2 EEG electrodes (Cz and Pz).
# In addition:
#
# - 1 stim channel for marking presentation times for the stimuli
# - 1 audio channel for the sent signal
# - 1 response channel for recording the button presses
# - 1 ECG bipolar
# - 2 EOG bipolar (vertical and horizontal)
# - 12 head tracking channels
# - 20 unused channels
#
# The head tracking channels and the unused channels are marked as misc
# channels. Here we define the EOG and ECG channels.
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick_types(meg=True, eeg=False, stim=True, misc=True, eog=True,
ecg=True)
###############################################################################
# For noise reduction, a set of bad segments have been identified and stored
# in csv files. The bad segments are later used to reject epochs that overlap
# with them.
# The file for the second run also contains some saccades. The saccades are
# removed by using SSP. We use pandas to read the data from the csv files. You
# can also view the files with your favorite text editor.
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = map(str, annotations_df['label'].values)
annotations = mne.Annotations(onsets, durations, descriptions)
raw.annotations = annotations
del onsets, durations, descriptions
###############################################################################
# Here we compute the saccade and EOG projectors for magnetometers and add
# them to the raw data. The projectors are added to both runs.
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
###############################################################################
# Visually inspect the effects of projections. Click on 'proj' button at the
# bottom right corner to toggle the projectors on/off. EOG events can be
# plotted by adding the event list as a keyword argument. As the bad segments
# and saccades were added as annotations to the raw data, they are plotted as
# well.
raw.plot(block=True)
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. The power spectra are plotted
# before and after the filtering to show the effect. The drop after 600 Hz
# appears because the data was filtered during the acquisition. In memory
# saving mode we do the filtering at evoked stage, which is not something you
# usually would do.
if not use_precomputed:
meg_picks = mne.pick_types(raw.info, meg=True, eeg=False)
raw.plot_psd(picks=meg_picks)
notches = np.arange(60, 181, 60)
raw.notch_filter(notches)
raw.plot_psd(picks=meg_picks)
###############################################################################
# We also lowpass filter the data at 100 Hz to remove the hf components.
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double')
###############################################################################
# Epoching and averaging.
# First some parameters are defined and events extracted from the stimulus
# channel (UPPT001). The rejection thresholds are defined as peak-to-peak
# values and are in T / m for gradiometers, T for magnetometers and
# V for EOG and EEG channels.
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
###############################################################################
# The event timing is adjusted by comparing the trigger times on detected
# sound onsets on channel UADC001-4408.
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
###############################################################################
# We mark a set of bad channels that seem noisier than others. This can also
# be done interactively with ``raw.plot`` by clicking the channel name
# (or the line). The marked channels are added as bad when the browser window
# is closed.
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
###############################################################################
# The epochs (trials) are created for MEG channels. First we find the picks
# for MEG and EOG channels. Then the epochs are constructed using these picks.
# The epochs overlapping with annotated bad segments are also rejected by
# default. To turn off rejection by bad segments (as was done earlier with
# saccades) you can use keyword ``reject_by_annotation=False``.
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False,
proj=True)
###############################################################################
# We only use first 40 good epochs from each run. Since we first drop the bad
# epochs, the indices of the epochs are no longer same as in the original
# epochs collection. Investigation of the event timings reveals that first
# epoch from the second run corresponds to index 182.
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs, picks
###############################################################################
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
sfreq = evoked_std.info['sfreq']
nchan = evoked_std.info['nchan']
notches = [60, 120, 180]
for ch_idx in range(nchan):
evoked_std.data[ch_idx] = notch_filter(evoked_std.data[ch_idx], sfreq,
notches, verbose='ERROR')
evoked_dev.data[ch_idx] = notch_filter(evoked_dev.data[ch_idx], sfreq,
notches, verbose='ERROR')
evoked_std.data[ch_idx] = low_pass_filter(evoked_std.data[ch_idx],
sfreq, 100, verbose='ERROR')
evoked_dev.data[ch_idx] = low_pass_filter(evoked_dev.data[ch_idx],
sfreq, 100, verbose='ERROR')
###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std.plot(window_title='Standard', gfp=True)
evoked_dev.plot(window_title='Deviant', gfp=True)
###############################################################################
# Show activations as topography figures.
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard')
evoked_dev.plot_topomap(times=times, title='Deviant')
###############################################################################
# We can see the MMN effect more clearly by looking at the difference between
# the two conditions. P50 and N100 are no longer visible, but MMN/P200 and
# P300 are emphasised.
evoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')
evoked_difference.plot(window_title='Difference', gfp=True)
###############################################################################
# Source estimation.
# We compute the noise covariance matrix from the empty room measurement
# and use it for the other runs.
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
###############################################################################
# The transformation is read from a file. More information about coregistering
# the data, see :ref:`ch_interactive_analysis` or
# :func:`mne.gui.coregistration`.
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
###############################################################################
# To save time and memory, the forward solution is read from a file. Set
# ``use_precomputed=False`` in the beginning of this script to build the
# forward solution from scratch. The head surfaces for constructing a BEM
# solution are read from a file. Since the data only contains MEG channels, we
# only need the inner skull surface for making the forward solution. For more
# information: :ref:`CHDBBCEJ`, :func:`mne.setup_source_space`,
# :ref:`create_bem_model`, :func:`mne.bem.make_watershed_bem`.
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
###############################################################################
# The sources are computed using dSPM method and plotted on an inflated brain
# surface. For interactive controls over the image, use keyword
# ``time_viewer=True``.
# Standard condition.
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
###############################################################################
# Deviant condition.
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
###############################################################################
# Difference.
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
| bsd-3-clause |
tomacorp/thermapythia | thermpy/interactivePlot.py | 1 | 3654 | import matplotlib.pyplot as plt
import Matls
import Layers
class interactivePlot:
def __init__(self, lyr, mesh):
self.lyr = lyr
self.mesh = mesh
def plotSolution(self):
"""
plotsolution(interactivePlot self)
Plot the problem grids and also the solution grid.
"""
self.plotResistance()
self.plotTemperature()
self.plotDirichlet()
self.plotHeatSources()
self.plotSpicedeg()
self.plotIsotherm()
self.show()
def show(self):
plt.show()
def plotIsotherm(self):
"""
Make a plot that shows which nodes have Dirichlet boundary conditions
attached to them through a resistor
"""
z5= self.mesh.ifield[:, :, self.lyr.isoflag];
plt.figure(5)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad4= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z5)
plt.colorbar()
plt.title('Nodes with Dirichlet boundary conditions map')
plt.draw()
def plotHeatSources(self):
"""
Make a plot that shows which nodes have heat sources attached.
"""
z4= self.mesh.field[:, :, self.lyr.heat];
plt.figure(4)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad4= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z4)
plt.colorbar()
plt.title('Heat sources map')
plt.draw()
def plotDirichlet(self):
"""
Make a plot that shows the relative temperature of the Dirichlet
boundary condition nodes.
"""
z3= self.mesh.field[:, :, self.lyr.isodeg];
plt.figure(3)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad3= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z3)
plt.colorbar()
plt.title('Dirichlet boundary conditions temperature map')
plt.draw()
def plotTemperature(self):
"""
Make a plot that shows the temperature of the mesh nodes.
"""
plt.figure(2)
z2= self.mesh.field[:, :, self.lyr.deg];
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad2= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z2)
plt.colorbar()
plt.title('AztecOO heat map')
plt.draw()
def plotResistance(self):
"""
Make a plot that shows the thermal resistance of the materials in the mesh nodes.
"""
z1= self.mesh.field[:, :, self.lyr.resis];
plt.figure(1)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad1= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z1)
plt.colorbar()
plt.title('Thermal resistance map')
plt.draw()
def plotSpicedeg(self):
"""
Make a plot that shows the temperature of the mesh nodes as simulated by Xyce.
"""
z1= self.mesh.field[:, :, self.lyr.spicedeg];
plt.figure(1)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad1= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z1)
plt.colorbar()
plt.title('Spice heat map')
plt.draw()
def plotLayerDifference(self, layer1, layer2):
"""
Make a plot that shows the difference between two values in the mesh.
"""
z1= self.mesh.field[:, :, layer1];
z2= self.mesh.field[:, :, layer2];
z3= z1 - z2
plt.figure(1)
plt.subplot(1,1,1)
plt.axes(aspect=1)
quad1= plt.pcolormesh(self.mesh.xr, self.mesh.yr, z3)
plt.colorbar()
plt.title('Difference heat map')
plt.draw()
def main():
print "This is a test program for interactivePlot. It draws a graph on the screen."
import Layers
import Mesh2D
lyr = Layers.Layers()
matls = Matls.Matls()
mesh = Mesh2D.Mesh(3, 3, lyr, matls)
for x in range(0,3):
for y in range(0,3):
mesh.field[x, y, lyr.spicedeg] = (x+1) * ((y+1) + 1)
plots= interactivePlot(lyr, mesh)
plots.plotSpicedeg()
plots.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
freedomtan/tensorflow | tensorflow/python/kernel_tests/constant_op_eager_test.py | 6 | 22281 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegex(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegex(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.disable_tfrt("support creating string tensors from empty "
"numpy arrays.")
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testStringConstantOp(self):
s = constant_op.constant("uiuc")
self.assertEqual(s.numpy().decode("utf-8"), "uiuc")
s_array = constant_op.constant(["mit", "stanford"])
self.assertAllEqual(s_array.numpy(), ["mit", "stanford"])
with ops.device("/cpu:0"):
s = constant_op.constant("cmu")
self.assertEqual(s.numpy().decode("utf-8"), "cmu")
s_array = constant_op.constant(["berkeley", "ucla"])
self.assertAllEqual(s_array.numpy(), ["berkeley", "ucla"])
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegex(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self):
return -1
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegex(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegex(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegex(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegex(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegex(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
rs2/pandas | pandas/tests/frame/test_repr_info.py | 2 | 6729 | from datetime import datetime, timedelta
from io import StringIO
import warnings
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
option_context,
period_range,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
class TestDataFrameReprInfoEtc:
def test_repr_empty(self):
# empty
repr(DataFrame())
# empty with index
frame = DataFrame(index=np.arange(1000))
repr(frame)
def test_repr_mixed(self, float_string_frame):
buf = StringIO()
# mixed
repr(float_string_frame)
float_string_frame.info(verbose=False, buf=buf)
@pytest.mark.slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)}, index=range(200)
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
repr(biggie)
def test_repr(self, float_frame):
buf = StringIO()
# small one
repr(float_frame)
float_frame.info(verbose=False, buf=buf)
# even smaller
float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf)
float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
repr(no_index)
# no columns or index
DataFrame().info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
assert "\r" not in repr(df)
assert "a\n" not in repr(df)
def test_repr_dimensions(self):
df = DataFrame([[1, 2], [3, 4]])
with option_context("display.show_dimensions", True):
assert "2 rows x 2 columns" in repr(df)
with option_context("display.show_dimensions", False):
assert "2 rows x 2 columns" not in repr(df)
with option_context("display.show_dimensions", "truncate"):
assert "2 rows x 2 columns" not in repr(df)
@pytest.mark.slow
def test_repr_big(self):
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200))
repr(biggie)
def test_repr_unsortable(self, float_frame):
# columns are not sortable
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
unsortable = DataFrame(
{
"foo": [1] * 50,
datetime.today(): [1] * 50,
"bar": ["bar"] * 50,
datetime.today() + timedelta(1): ["bar"] * 50,
},
index=np.arange(50),
)
repr(unsortable)
fmt.set_option("display.precision", 3, "display.column_space", 10)
repr(float_frame)
fmt.set_option("display.max_rows", 10, "display.max_columns", 2)
repr(float_frame)
fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000)
repr(float_frame)
tm.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = "\u03c3\u03c3\u03c3\u03c3"
df = DataFrame({"A": [uval, uval]})
result = repr(df)
ex_top = " A"
assert result.split("\n")[0].rstrip() == ex_top
df = DataFrame({"A": [uval, uval]})
result = repr(df)
assert result.split("\n")[0].rstrip() == ex_top
def test_unicode_string_with_unicode(self):
df = DataFrame({"A": ["\u05d0"]})
str(df)
def test_repr_unicode_columns(self):
df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_str_to_bytes_raises(self):
# GH 26447
df = DataFrame({"A": ["abc"]})
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame(
{
"Id": [7117434],
"StringCol": (
"Is it possible to modify drop plot code"
"so that the output graph is displayed "
"in iphone simulator, Is it possible to "
"modify drop plot code so that the "
"output graph is \xe2\x80\xa8displayed "
"in iphone simulator.Now we are adding "
"the CSV file externally. I want to Call "
"the File through the code.."
),
}
)
with option_context("display.max_columns", 20):
assert "StringCol" in repr(df)
def test_latex_repr(self):
result = r"""\begin{tabular}{llll}
\toprule
{} & 0 & 1 & 2 \\
\midrule
0 & $\alpha$ & b & c \\
1 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
with option_context("display.latex.escape", False, "display.latex.repr", True):
df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])
assert result == df._repr_latex_()
# GH 12182
assert df._repr_latex_() is None
def test_repr_categorical_dates_periods(self):
# normal DataFrame
dt = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
p = period_range("2011-01", freq="M", periods=5)
df = DataFrame({"dt": dt, "p": p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
assert repr(df) == exp
df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)})
assert repr(df2) == exp
@pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64])
@pytest.mark.parametrize(
"box, expected",
[[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]],
)
def test_repr_np_nat_with_object(self, arg, box, expected):
# GH 25445
result = repr(box([arg("NaT")], dtype=object))
assert result == expected
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
| bsd-3-clause |
wlamond/scikit-learn | sklearn/svm/tests/test_svm.py | 33 | 35916 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings, assert_raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_true(hasattr(clf, "coef_") == (k == 'linear'))
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [-1, -1, -1])
assert_equal(pred.dtype, np.dtype('intp'))
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(AttributeError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError on clf.fit
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=unicode('linear'))
clf.fit(X, Y)
# Test ascii bytes (str is bytes in python2)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
else:
# Test unicode (str is unicode in python3)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
# Test ascii bytes (same as str on python2)
clf = svm.SVC(kernel=bytes('linear', 'ascii'))
clf.fit(X, Y)
# Test default behavior on both versions
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert_greater(np.min(pred_class_deci_val), 0.0)
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]))
| bsd-3-clause |
ckinzthompson/biasd | biasd/gui/plotter.py | 1 | 2859 | # -*- coding: utf-8 -*-®
'''
PyQt trace plotter widget
'''
from PyQt5.QtWidgets import QWidget,QSizePolicy
# Make sure that we are using QT5
import matplotlib
matplotlib.use('Qt5Agg')
import numpy as np
from matplotlib.backends.backend_qt5agg import FigureCanvas
import matplotlib.pyplot as plt
class trace_plotter(FigureCanvas):
def __init__(self,parent=None):
self.f, self.a = plt.subplots(1,figsize=(8,4))
FigureCanvas.__init__(self,self.f)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.prep_axis()
plt.close()
def prep_axis(self):
self.a.spines['right'].set_visible(False)
self.a.spines['top'].set_visible(False)
self.a.yaxis.set_ticks_position('left')
self.a.xaxis.set_ticks_position('bottom')
self.a.set_xlabel('Time (s)')
self.a.set_ylabel('Signal')
self.line = self.a.plot([],[])[0]
self.plot_trace([0,1.],[0,1.])
self.a.set_title('-/0')
self.f.tight_layout()
self.line.set_xdata([])
self.line.set_ydata([])
self.draw()
def plot_trace(self,t,d,title=''):
if np.ndim(d) == 1:
if t is None:
t = np.arange(d.size)
self.line.set_xdata(t)
self.line.set_ydata(d)
self.a.set_xlim(np.min(t),np.max(t))
deltad = 0.1*(np.max(d)-np.min(d))
self.a.set_ylim(np.min(d) - deltad, np.max(d) + deltad)
self.line.set_color('black')
self.line.set_linewidth(1.)
self.a.set_title(title)
self.draw()
def clear_plot(self):
self.line.set_xdata([])
self.line.set_ydata([])
self.a.set_title('')
self.a.set_xlabel('')
self.draw()
def plot_dist(self,dist_index,pc):
dnames = [r'$\varepsilon_1$', r'$\varepsilon_2$', r'$\sigma$', r'$k_1$', r'$k_2$']
colors = ['purple', 'yellow', 'green', 'cyan', 'orange']
xlabels = ['Signal', 'Signal', 'Signal Noise', r'Rate Constant (s$^{-1}$)', r'Rate Constant (s$^{-1}$)']
dist_dict = dict(zip(range(5),[pc.e1,pc.e2,pc.sigma,pc.k1,pc.k2]))
dist = dist_dict[dist_index]
if not dist.okay:
self.clear_plot()
else:
self.a.cla()
distx = dist.get_ranged_x(1001)
disty = dist.pdf(distx)
self.line = self.a.plot(distx,disty,color='k',lw=2)
# self.line.set_color(self.colors[self.selected])
self.filledin = self.a.fill_between(distx, disty, color=colors[dist_index], alpha=0.75)
if dist.name == 'beta':
self.a.set_xlim(0,1)
elif dist.name == 'gamma':
self.a.set_xlim(0,distx[-1])
else:
self.a.set_xlim(distx[0],distx[-1])
if dist.name != 'empty':
self.a.set_ylim(0.,disty[np.isfinite(disty)].max()*1.2)
self.a.set_ylabel('Probability',fontsize=18)
self.a.set_xlabel(xlabels[dist_index],fontsize=18)
self.a.set_title(dist.label_parameters[0]+": "+str(dist.parameters[0])+", "+dist.label_parameters[1]+": "+str(dist.parameters[1])+r", $E[x] = $"+str(dist.mean()))
self.draw()
| mit |
ielashi/chinese-zodiac-classifier | preprocess.py | 1 | 2924 | # Utility functions for pre-processing the raw dataset
from math import floor
from scipy.misc import imresize
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def load_dataset(filename):
f = open(filename, 'r')
dataset = []
for line in f:
data = line.strip().split(' ')
classification = data[0]
features = data[1:]
if len(features) >= 20:
character = np.array([0.] * 12810)
for feature in features:
(i, feature_value) = feature.split(':')
character[int(i) - 1] = float(feature_value)
character = character.reshape(122, 105)
dataset.append((character, classification))
return dataset
def load_test_dataset(filename):
f = open(filename, 'r')
dataset = []
for line in f:
data = line.strip().split(' ')
classification = data[0]
features = data[1:]
character = np.array([0.] * 12810)
for feature in features:
(i, feature_value) = feature.split(':')
character[int(i) - 1] = float(feature_value)
character = character.reshape(122, 105)
dataset.append((character, classification))
return dataset
def load_preprocessed_dataset(filename):
f = open(filename, 'r')
dataset = []
for line in f:
classification, character = line.strip().split(' ')
character = np.array([float(i) for i in character.split(',')])
dataset.append((character, int(classification)))
X = np.array([e[0] for e in dataset])
y = np.array([e[1] for e in dataset])
return X, y
def crop_bounding_box(dataset):
new_dataset = []
for character, _ in dataset:
B = np.argwhere(character)
try:
(ystart, xstart), (ystop, xstop) = B.min(0), B.max(0) + 1
character = character[ystart:ystop, xstart:xstop]
new_dataset.append((character, _))
except:
# something is wrong with the image (empty image?)
# add image as is
new_dataset.append((character, _))
return new_dataset
def make_binary(dataset, threshold=10):
new_dataset = []
for character, _ in dataset:
character[character < threshold] = 0
character[character >= threshold] = 1
new_dataset.append((character, _))
return new_dataset
def resize_images(dataset, width, height):
return [(imresize(ch, (width, height)) / 256.0, cl) for ch, cl in dataset]
def output_dataset(dataset):
for character, class_ in dataset:
print class_, ','.join(['%f' % num for num in character.ravel()])
def preprocess(filename):
dataset = load_dataset(filename)
dataset = crop_bounding_box(dataset)
dataset = resize_images(dataset, 28, 28)
X = [e[0].ravel() for e in dataset]
y = [e[1] for e in dataset]
return X, y
def test_preprocess(filename):
dataset = load_test_dataset(filename)
dataset = crop_bounding_box(dataset)
dataset = resize_images(dataset, 28, 28)
X = [e[0].ravel() for e in dataset]
y = [e[1] for e in dataset]
return X, y
| gpl-2.0 |
soulmachine/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
peastman/deepchem | deepchem/models/tests/test_predict.py | 4 | 1924 | """
Tests that deepchem models make deterministic predictions.
"""
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
class TestPredict(test_util.TensorFlowTestCase):
"""
Test that models make deterministic predictions
These tests guard against failures like having dropout turned on at
test time.
"""
def setUp(self):
super(TestPredict, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
def test_tf_progressive_regression_predict(self):
"""Test tf progressive multitask makes deterministic predictions."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[.25],
learning_rate=0.003,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Check same predictions are made.
y_pred_first = model.predict(dataset)
y_pred_second = model.predict(dataset)
np.testing.assert_allclose(y_pred_first, y_pred_second)
'''
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/dannys_example.py | 9 | 2637 | import matplotlib
matplotlib.rc('text', usetex = True)
import pylab
import numpy as np
## interface tracking profiles
N = 500
delta = 0.6
X = -1 + 2. * np.arange(N) / (N - 1)
pylab.plot(X, (1 - np.tanh(4. * X / delta)) / 2, ## phase field tanh profiles
X, (X + 1) / 2, ## level set distance function
X, (1.4 + np.tanh(4. * X / delta)) / 4, ## composition profile
X, X < 0, 'k--', ## sharp interface
linewidth = 5)
## legend
pylab.legend((r'phase field', r'level set', r'composition', r'sharp interface'), shadow = True, loc = (0.01, 0.55))
ltext = pylab.gca().get_legend().get_texts()
pylab.setp(ltext[0], fontsize = 20, color = 'b')
pylab.setp(ltext[1], fontsize = 20, color = 'g')
pylab.setp(ltext[2], fontsize = 20, color = 'r')
pylab.setp(ltext[3], fontsize = 20, color = 'k')
## the arrow
height = 0.1
offset = 0.02
pylab.plot((-delta / 2., delta / 2), (height, height), 'k', linewidth = 2)
pylab.plot((-delta / 2, -delta / 2 + offset * 2), (height, height - offset), 'k', linewidth = 2)
pylab.plot((-delta / 2, -delta / 2 + offset * 2), (height, height + offset), 'k', linewidth = 2)
pylab.plot((delta / 2, delta / 2 - offset * 2), (height, height - offset), 'k', linewidth = 2)
pylab.plot((delta / 2, delta / 2 - offset * 2), (height, height + offset), 'k', linewidth = 2)
pylab.text(-0.06, height - 0.06, r'$\delta$', {'color' : 'k', 'fontsize' : 24})
## X-axis label
pylab.xticks((-1, 0, 1), ('-1', '0', '1'), color = 'k', size = 20)
## Left Y-axis labels
pylab.ylabel(r'\bf{phase field} $\phi$', {'color' : 'b',
'fontsize' : 20 })
pylab.yticks((0, 0.5, 1), ('0', '.5', '1'), color = 'k', size = 20)
## Right Y-axis labels
pylab.text(1.05, 0.5, r"\bf{level set} $\phi$", {'color' : 'g', 'fontsize' : 20},
horizontalalignment = 'left',
verticalalignment = 'center',
rotation = 90,
clip_on = False)
pylab.text(1.01, -0.02, "-1", {'color' : 'k', 'fontsize' : 20})
pylab.text(1.01, 0.98, "1", {'color' : 'k', 'fontsize' : 20})
pylab.text(1.01, 0.48, "0", {'color' : 'k', 'fontsize' : 20})
## level set equations
pylab.text(0.1, 0.85, r'$|\nabla\phi| = 1,$ \newline $ \frac{\partial \phi}{\partial t} + U|\nabla \phi| = 0$', {'color' : 'g', 'fontsize' : 20})
## phase field equations
pylab.text(0.2, 0.15, r'$\mathcal{F} = \int f\left( \phi, c \right) dV,$ \newline $ \frac{ \partial \phi } { \partial t } = -M_{ \phi } \frac{ \delta \mathcal{F} } { \delta \phi }$',
{'color' : 'b', 'fontsize' : 20})
pylab.show()
| gpl-2.0 |
msultan/msmbuilder | msmbuilder/tests/test_ghmm.py | 3 | 6219 | from __future__ import print_function, division
import warnings
from itertools import permutations
import hmmlearn.hmm
import numpy as np
import pickle
import tempfile
from sklearn.pipeline import Pipeline
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import SuperposeFeaturizer
from msmbuilder.hmm import GaussianHMM
rs = np.random.RandomState(42)
def test_ala2():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = SuperposeFeaturizer(indices, trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = GaussianHMM(n_states=4, n_init=3, random_state=rs)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def create_timeseries(means, vars, transmat):
"""Construct a random timeseries based on a specified Markov model."""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
model = hmmlearn.hmm.GaussianHMM(n_components=len(means),
random_state=rs)
model.means_ = means
model.covars_ = vars
model.transmat_ = transmat
X, Y = model.sample(1000)
return X
def validate_timeseries(means, vars, transmat, model,
valuetol=1e-3, transmattol=1e-3):
"""Whether our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(means[i] - model.means_[order[i]]) > valuetol:
match = False
break
if abs(vars[i] - model.vars_[order[i]]) > valuetol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [5.0]])
vars = np.array([[1.0], [1.0]])
X = [create_timeseries(means, vars, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
class two_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_3_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=2, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield two_state_tester(init_algo, reversible_type)
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [10.0], [5.0]])
vars = np.array([[1.0], [2.0], [0.3]])
X = [create_timeseries(means, vars, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
class three_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_2_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=3, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield three_state_tester(init_algo, reversible_type)
def test_pipeline():
trajs = AlanineDipeptide().get_cached().trajectories
topology = trajs[0].topology
indices = topology.select('backbone')
p = Pipeline([
('diheds', SuperposeFeaturizer(indices, trajs[0][0])),
('hmm', GaussianHMM(n_states=4))
])
predict = p.fit_predict(trajs)
p.named_steps['hmm'].summarize()
def test_pickle():
"""Test pickling an HMM"""
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = SuperposeFeaturizer(indices, trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = GaussianHMM(n_states=4, n_init=3, random_state=rs)
hmm.fit(sequences)
logprob, hidden = hmm.predict(sequences)
with tempfile.TemporaryFile() as savefile:
pickle.dump(hmm, savefile)
savefile.seek(0, 0)
hmm2 = pickle.load(savefile)
logprob2, hidden2 = hmm2.predict(sequences)
assert(logprob == logprob2) | lgpl-2.1 |
pypot/scikit-learn | sklearn/externals/joblib/parallel.py | 36 | 34375 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
tskisner/pytoast | src/python/tod/sim_det_atm.py | 1 | 22542 | # Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import os
from scipy.constants import degree
from toast.ctoast import (
atm_sim_alloc, atm_sim_free, atm_sim_simulate, atm_sim_observe,
atm_get_absorption_coefficient, atm_get_atmospheric_loading)
from toast.mpi import MPI
from toast.op import Operator
import numpy as np
import toast.qarray as qa
import toast.timing as timing
class OpSimAtmosphere(Operator):
"""
Operator which generates atmosphere timestreams.
All processes collectively generate the atmospheric realization.
Then each process passes through its local data and observes the
atmosphere.
This operator is only compatible with TOD objects that can return
AZ/EL pointing.
Args:
out (str): accumulate data to the cache with name
<out>_<detector>. If the named cache objects do not exist,
then they are created.
realization (int): if simulating multiple realizations, the
realization index.
component (int): the component index to use for this noise
simulation.
lmin_center (float): Kolmogorov turbulence dissipation scale
center.
lmin_sigma (float): Kolmogorov turbulence dissipation scale
sigma.
lmax_center (float): Kolmogorov turbulence injection scale
center.
lmax_sigma (float): Kolmogorov turbulence injection scale sigma.
gain (float): Scaling applied to the simulated TOD.
zatm (float): atmosphere extent for temperature profile.
zmax (float): atmosphere extent for water vapor integration.
xstep (float): size of volume elements in X direction.
ystep (float): size of volume elements in Y direction.
zstep (float): size of volume elements in Z direction.
nelem_sim_max (int): controls the size of the simulation slices.
verbosity (int): more information is printed for values > 0.
gangsize (int): size of the gangs that create slices.
z0_center (float): central value of the water vapor
distribution.
z0_sigma (float): sigma of the water vapor distribution.
common_flag_name (str): Cache name of the output common flags.
If it already exists, it is used. Otherwise flags
are read from the tod object and stored in the cache under
common_flag_name.
common_flag_mask (byte): Bitmask to use when flagging data
based on the common flags.
flag_name (str): Cache name of the output detector flags will
be <flag_name>_<detector>. If the object exists, it is
used. Otherwise flags are read from the tod object.
flag_mask (byte): Bitmask to use when flagging data
based on the detector flags.
apply_flags (bool): When True, flagged samples are not
simulated.
report_timing (bool): Print out time taken to initialize,
simulate and observe
wind_time (float): Maximum time to simulate before
discarding the volume and creating a new one [seconds].
cachedir (str): Directory to use for loading and saving
atmosphere realizations. Set to None to disable caching.
flush (bool): Flush all print statements
freq (float): Observing frequency in GHz.
"""
def __init__(
self, out='atm', realization=0, component=123456,
lmin_center=0.01, lmin_sigma=0.001,
lmax_center=10, lmax_sigma=10, zatm=40000.0, zmax=2000.0,
xstep=100.0, ystep=100.0, zstep=100.0, nelem_sim_max=10000,
verbosity=0, gangsize=-1, gain=1, z0_center=2000, z0_sigma=0,
apply_flags=False, common_flag_name=None, common_flag_mask=255,
flag_name=None, flag_mask=255, report_timing=True,
wind_time=3600, cachedir='.', flush=False, freq=None):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._realization = realization
self._component = component
self._lmin_center = lmin_center
self._lmin_sigma = lmin_sigma
self._lmax_center = lmax_center
self._lmax_sigma = lmax_sigma
self._gain = gain
self._zatm = zatm
self._zmax = zmax
self._xstep = xstep
self._ystep = ystep
self._zstep = zstep
self._nelem_sim_max = nelem_sim_max
self._verbosity = verbosity
self._gangsize = gangsize
self._cachedir = cachedir
self._flush = flush
self._freq = freq
self._z0_center = z0_center
self._z0_sigma = z0_sigma
self._apply_flags = apply_flags
self._common_flag_name = common_flag_name
self._common_flag_mask = common_flag_mask
self._flag_name = flag_name
self._flag_mask = flag_mask
self._report_timing = report_timing
self._wind_time = wind_time
def exec(self, data):
"""
Generate atmosphere timestreams.
This iterates over all observations and detectors and generates
the atmosphere timestreams.
Args:
data (toast.Data): The distributed data.
"""
autotimer = timing.auto_timer(type(self).__name__)
group = data.comm.group
for obs in data.obs:
try:
obsname = obs['name']
except Exception:
obsname = 'observation'
prefix = '{} : {} : '.format(group, obsname)
tod = self._get_from_obs('tod', obs)
comm = tod.mpicomm
obsindx = self._get_from_obs('id', obs)
telescope = self._get_from_obs('telescope_id', obs)
site = self._get_from_obs('site_id', obs)
altitude = self._get_from_obs('altitude', obs)
weather = self._get_from_obs('weather', obs)
fp_radius = np.radians(self._get_from_obs('fpradius', obs))
# Get the observation time span and initialize the weather
# object if one is provided.
times = tod.local_times()
tmin = times[0]
tmax = times[-1]
tmin_tot = comm.allreduce(tmin, op=MPI.MIN)
tmax_tot = comm.allreduce(tmax, op=MPI.MAX)
weather.set(site, self._realization, tmin_tot)
"""
The random number generator accepts a key and a counter,
each made of two 64bit integers.
Following tod_math.py we set
key1 = realization * 2^32 + telescope * 2^16 + component
key2 = obsindx * 2^32
counter1 = currently unused (0)
counter2 = sample in stream (incremented internally in the atm code)
"""
key1 = self._realization * 2 ** 32 + telescope * 2 ** 16 \
+ self._component
key2 = site * 2 ** 16 + obsindx
counter1 = 0
counter2 = 0
if self._freq is not None:
absorption = atm_get_absorption_coefficient(
altitude, weather.air_temperature, weather.surface_pressure,
weather.pwv, self._freq)
loading = atm_get_atmospheric_loading(
altitude, weather.air_temperature, weather.surface_pressure,
weather.pwv, self._freq)
tod.meta['loading'] = loading
else:
absorption = None
if self._cachedir is None:
cachedir = None
else:
# The number of atmospheric realizations can be large. Use
# sub-directories under cachedir.
subdir = str(int((obsindx % 1000) // 100))
subsubdir = str(int((obsindx % 100) // 10))
subsubsubdir = str(obsindx % 10)
cachedir = os.path.join(self._cachedir, subdir, subsubdir,
subsubsubdir)
if comm.rank == 0:
try:
os.makedirs(cachedir)
except FileExistsError:
pass
comm.Barrier()
if comm.rank == 0:
print(prefix + 'Setting up atmosphere simulation',
flush=self._flush)
comm.Barrier()
# Cache the output common flags
common_ref = tod.local_common_flags(self._common_flag_name)
# Read the extent of the AZ/EL boresight pointing, and use that
# to compute the range of angles needed for simulating the slab.
(min_az_bore, max_az_bore, min_el_bore, max_el_bore
) = tod.scan_range
# print("boresight scan range = {}, {}, {}, {}".format(
# min_az_bore, max_az_bore, min_el_bore, max_el_bore))
# Use a fixed focal plane radius so that changing the actual
# set of detectors will not affect the simulated atmosphere.
elfac = 1 / np.cos(max_el_bore + fp_radius)
azmin = min_az_bore - fp_radius * elfac
azmax = max_az_bore + fp_radius * elfac
if azmin < -2 * np.pi:
azmin += 2 * np.pi
azmax += 2 * np.pi
elif azmax > 2 * np.pi:
azmin -= 2 * np.pi
azmax -= 2 * np.pi
elmin = min_el_bore - fp_radius
elmax = max_el_bore + fp_radius
azmin = comm.allreduce(azmin, op=MPI.MIN)
azmax = comm.allreduce(azmax, op=MPI.MAX)
elmin = comm.allreduce(elmin, op=MPI.MIN)
elmax = comm.allreduce(elmax, op=MPI.MAX)
if elmin < 0 or elmax > np.pi / 2:
raise RuntimeError(
'Error in CES elevation: elmin = {:.2f}, elmax = {:.2f}'
''.format(elmin, elmax))
comm.Barrier()
# Loop over the time span in "wind_time"-sized chunks.
# wind_time is intended to reflect the correlation length
# in the atmospheric noise.
tmin = tmin_tot
istart = 0
while tmin < tmax_tot:
while times[istart] < tmin:
istart += 1
tmax = tmin + self._wind_time
if tmax < tmax_tot:
# Extend the scan to the next turnaround
istop = istart
while istop < times.size and times[istop] < tmax:
istop += 1
while istop < times.size and (common_ref[istop]
| tod.TURNAROUND == 0):
istop += 1
if istop < times.size:
tmax = times[istop]
else:
tmax = tmax_tot
else:
tmax = tmax_tot
istop = times.size
ind = slice(istart, istop)
nind = istop - istart
if self._report_timing:
comm.Barrier()
tstart = MPI.Wtime()
comm.Barrier()
if comm.rank == 0:
print(prefix + 'Instantiating the atmosphere for t = {}'
''.format(tmin - tmin_tot), flush=self._flush)
comm.Barrier()
T0_center = weather.air_temperature
wx = weather.west_wind
wy = weather.south_wind
w_center = np.sqrt(wx ** 2 + wy ** 2)
wdir_center = np.arctan2(wy, wx)
sim = atm_sim_alloc(
azmin, azmax, elmin, elmax, tmin, tmax,
self._lmin_center, self._lmin_sigma,
self._lmax_center, self._lmax_sigma,
w_center, 0, wdir_center, 0,
self._z0_center, self._z0_sigma, T0_center, 0,
self._zatm, self._zmax, self._xstep,
self._ystep, self._zstep, self._nelem_sim_max,
self._verbosity, comm, self._gangsize,
key1, key2, counter1, counter2, cachedir)
if sim == 0:
raise RuntimeError(prefix + 'Failed to allocate simulation')
if self._report_timing:
comm.Barrier()
tstop = MPI.Wtime()
if comm.rank == 0 and tstop - tstart > 1:
print(prefix + 'OpSimAtmosphere: Initialized '
'atmosphere in {:.2f} s'.format(tstop - tstart),
flush=self._flush)
tstart = tstop
comm.Barrier()
use_cache = cachedir is not None
if comm.rank == 0:
fname = os.path.join(
cachedir, '{}_{}_{}_{}_metadata.txt'.format(
key1, key2, counter1, counter2))
if use_cache and os.path.isfile(fname):
print(prefix + 'Loading the atmosphere for t = {} '
'from {}'.format(tmin - tmin_tot, fname),
flush=self._flush)
cached = True
else:
print(prefix + 'Simulating the atmosphere for t = {}'
''.format(tmin - tmin_tot),
flush=self._flush)
cached = False
err = atm_sim_simulate(sim, use_cache)
if err != 0:
raise RuntimeError(prefix + 'Simulation failed.')
# Advance the sample counter in case wind_time broke the
# observation in parts
counter2 += 100000000
if self._report_timing:
comm.Barrier()
tstop = MPI.Wtime()
if comm.rank == 0 and tstop - tstart > 1:
if cached:
op = 'Loaded'
else:
op = 'Simulated'
print(prefix + 'OpSimAtmosphere: {} atmosphere in '
'{:.2f} s'.format(op, tstop - tstart),
flush=self._flush)
tstart = tstop
if self._verbosity > 0:
self._plot_snapshots(sim, prefix, obsname, azmin, azmax,
elmin, elmax, tmin, tmax, comm)
nsamp = tod.local_samples[1]
if self._report_timing:
comm.Barrier()
tstart = MPI.Wtime()
if comm.rank == 0:
print(prefix + 'Observing the atmosphere',
flush=self._flush)
for det in tod.local_dets:
# Cache the output signal
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64, (nsamp,))
# Cache the output flags
flag_ref = tod.local_flags(det, self._flag_name)
if self._apply_flags:
good = np.logical_and(
common_ref[ind] & self._common_flag_mask == 0,
flag_ref[ind] & self._flag_mask == 0)
ngood = np.sum(good)
if ngood == 0:
continue
azelquat = tod.read_pntg(
detector=det, local_start=istart, n=nind,
azel=True)[good]
atmdata = np.zeros(ngood, dtype=np.float64)
else:
ngood = nind
azelquat = tod.read_pntg(
detector=det, local_start=istart, n=nind, azel=True)
atmdata = np.zeros(nind, dtype=np.float64)
# Convert Az/El quaternion of the detector back into
# angles for the simulation.
theta, phi, _ = qa.to_angles(azelquat)
# Azimuth is measured in the opposite direction
# than longitude
az = 2 * np.pi - phi
el = np.pi / 2 - theta
if np.ptp(az) < np.pi:
azmin_det = np.amin(az)
azmax_det = np.amax(az)
else:
# Scanning across the zero azimuth.
azmin_det = np.amin(az[az > np.pi]) - 2 * np.pi
azmax_det = np.amax(az[az < np.pi])
elmin_det = np.amin(el)
elmax_det = np.amax(el)
if (
(not (azmin <= azmin_det and azmax_det <= azmax) and
not (azmin <= azmin_det - 2 * np.pi
and azmax_det - 2 * np.pi <= azmax))
or
not (elmin <= elmin_det and elmin_det <= elmax)
):
raise RuntimeError(
prefix + 'Detector Az/El: [{:.5f}, {:.5f}], '
'[{:.5f}, {:.5f}] is not contained in '
'[{:.5f}, {:.5f}], [{:.5f} {:.5f}]'
''.format(
azmin_det, azmax_det, elmin_det, elmax_det,
azmin, azmax, elmin, elmax))
# Integrate detector signal
err = atm_sim_observe(sim, times[ind], az, el, atmdata,
ngood, 0)
if err != 0:
# Observing failed
print(prefix + 'OpSimAtmosphere: Observing FAILED. '
'det = {}, rank = {}'.format(det, comm.rank),
flush=self._flush)
atmdata[:] = 0
flag_ref[ind] = 255
if self._gain:
atmdata *= self._gain
if absorption is not None:
# Apply the frequency-dependent absorption-coefficient
atmdata *= absorption
if self._apply_flags:
ref[ind][good] += atmdata
else:
ref[ind] += atmdata
del ref
err = atm_sim_free(sim)
if err != 0:
raise RuntimeError(prefix + 'Failed to free simulation.')
if self._report_timing:
comm.Barrier()
tstop = MPI.Wtime()
if comm.rank == 0 and tstop - tstart > 1:
print(prefix + 'OpSimAtmosphere: Observed atmosphere '
'in {:.2f} s'.format(tstop - tstart),
flush=self._flush)
tmin = tmax
return
def _plot_snapshots(self, sim, prefix, obsname, azmin, azmax,
elmin, elmax, tmin, tmax, comm):
""" Create snapshots of the atmosphere
"""
from ..vis import set_backend
set_backend()
import matplotlib.pyplot as plt
elstep = .01 * degree
azstep = elstep * np.cos(0.5 * (elmin + elmax))
azgrid = np.linspace(azmin, azmax,
(azmax - azmin) // azstep + 1)
elgrid = np.linspace(elmin, elmax,
(elmax - elmin) // elstep + 1)
AZ, EL = np.meshgrid(azgrid, elgrid)
nn = AZ.size
az = AZ.ravel()
el = EL.ravel()
atmdata = np.zeros(nn, dtype=np.float64)
atmtimes = np.zeros(nn, dtype=np.float64)
rank = comm.rank
ntask = comm.size
r = 0
t = 0
my_snapshots = []
vmin = 1e30
vmax = -1e30
tstep = 60
for i, t in enumerate(np.arange(tmin, tmax, tstep)):
if i % ntask != rank:
continue
err = atm_sim_observe(sim, atmtimes + t, az, el,
atmdata, nn, r)
if err != 0:
raise RuntimeError(prefix + 'Observation failed')
if self._gain:
atmdata *= self._gain
vmin = min(vmin, np.amin(atmdata))
vmax = max(vmax, np.amax(atmdata))
atmdata2d = atmdata.reshape(AZ.shape)
my_snapshots.append((t, r, atmdata2d.copy()))
vmin = comm.allreduce(vmin, op=MPI.MIN)
vmax = comm.allreduce(vmax, op=MPI.MAX)
for t, r, atmdata2d in my_snapshots:
plt.figure(figsize=[12, 4])
plt.imshow(
atmdata2d, interpolation='nearest',
origin='lower', extent=np.array(
[0, (azmax - azmin)
* np.cos(0.5 * (elmin + elmax)),
elmin, elmax]) / degree,
cmap=plt.get_cmap('Blues'), vmin=vmin, vmax=vmax)
plt.colorbar()
ax = plt.gca()
ax.set_title(
't = {:15.1f} s, r = {:15.1f} m'.format(t, r))
ax.set_xlabel('az [deg]')
ax.set_ylabel('el [deg]')
ax.set_yticks([elmin / degree, elmax / degree])
plt.savefig('atm_{}_t_{:04}_r_{:04}.png'.format(
obsname, int(t), int(r)))
plt.close()
del my_snapshots
return
def _get_from_obs(self, name, obs):
""" Extract value for name from observation.
If name is not defined in observation, raise an exception.
"""
if name in obs:
return obs[name]
else:
raise RuntimeError('Error simulating atmosphere: observation '
'does not define "{}"'.format(name))
| bsd-2-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/neural_network/tests/test_mlp.py | 15 | 21005 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| mit |
krafczyk/spack | var/spack/repos/builtin/packages/py-biom-format/package.py | 2 | 2316 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBiomFormat(PythonPackage):
"""The BIOM file format (canonically pronounced biome) is designed to be
a general-use format for representing biological sample by observation
contingency tables."""
homepage = "https://pypi.python.org/pypi/biom-format/2.1.6"
url = "https://pypi.io/packages/source/b/biom-format/biom-format-2.1.6.tar.gz"
version('2.1.6', '1dd4925b74c56e8ee864d5e1973068de')
variant('h5py', default=True, description='For use with BIOM 2.0+ files')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-h5py', type=('build', 'run'), when='+h5py')
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pyqi', type=('build', 'run'))
| lgpl-2.1 |
theDataGeek/pyhsmm | tests/test_hmm_geweke.py | 3 | 5991 | from __future__ import division
import numpy as np
from functools import wraps
from nose.plugins.attrib import attr
import os
import matplotlib.pyplot as plt
from pyhsmm import models, distributions
from pybasicbayes.util import testing
##########
# util #
##########
def runmultiple(n):
def dec(fn):
@wraps(fn)
def wrapper():
fig = plt.figure()
for i in range(n):
yield fn, fig
plt.close('all')
return wrapper
return dec
def mkdir(path):
# from
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
figure_dir_path = os.path.join(os.path.dirname(__file__),'figures')
mkdir(figure_dir_path)
###########
# tests #
###########
@attr('slow')
@runmultiple(2)
def discrete_geweke_test(fig):
Nstates = 2
Nemissions = 2
alpha = 3.
init_state_concentration=3.
T = 10
num_iter = 10000
num_checks = 10
obs_distns = [distributions.Categorical(K=Nemissions,alpha_0=1.)
for _ in range(Nstates)]
hmm = models.HMM(
alpha=alpha,init_state_concentration=init_state_concentration,
obs_distns=obs_distns)
# generate state sequences and parameters from the prior
prior_stateseqs = []
prior_weights = []
for itr in xrange(num_iter):
hmm.resample_model() # sample parameters from the prior
_, stateseq = hmm.generate(T,keep=False)
prior_stateseqs.append(stateseq)
prior_weights.append(hmm.obs_distns[0].weights)
prior_stateseqs = np.array(prior_stateseqs)
prior_weights = np.array(prior_weights)
# generate state sequences and parameters using Gibbs
hmm.generate(T,keep=True)
s = hmm.states_list[0]
gibbs_stateseqs = []
gibbs_weights = []
for itr in xrange(num_iter):
s.data = None
hmm._generate_obs(s) # resamples data given state sequence, obs params
hmm.resample_model() # resamples everything else as usual
gibbs_stateseqs.append(s.stateseq)
gibbs_weights.append(hmm.obs_distns[0].weights)
gibbs_stateseqs = np.array(gibbs_stateseqs)
gibbs_weights = np.array(gibbs_weights)
# test that they look similar by checking probability of co-assignment
time_indices = np.arange(T)
for itr in xrange(num_checks):
i,j = np.random.choice(time_indices,replace=False,size=2)
prior_prob_of_coassignment = (prior_stateseqs[:,i] == prior_stateseqs[:,j]).std()
gibbs_prob_of_coassignment = (gibbs_stateseqs[:,i] == gibbs_stateseqs[:,j]).std()
assert np.isclose(
prior_prob_of_coassignment,gibbs_prob_of_coassignment,
rtol=0.025,atol=0.025,
)
# test that they look similar by checking parameters
testing.populations_eq_quantile_plot(prior_weights,gibbs_weights,fig=fig)
figpath = os.path.join(figure_dir_path,'discrete_geweke_test_weights.pdf')
plt.savefig(figpath)
@attr('slow')
@runmultiple(2)
def discrete_geweke_multiple_seqs_test(fig):
Nstates = 2
Nemissions = 2
alpha = 3.
init_state_concentration=3.
T = 10
num_seqs = 3
num_iter = 10000
num_checks = 10
obs_distns = [distributions.Categorical(K=Nemissions,alpha_0=1.)
for _ in range(Nstates)]
hmm = models.HMM(
alpha=alpha,init_state_concentration=init_state_concentration,
obs_distns=obs_distns)
# generate state sequences and parameters from the prior
prior_stateseqss = [[] for _ in xrange(num_seqs)]
prior_weights = []
for itr in xrange(num_iter):
hmm.resample_model() # sample parameters from the prior
for itr2 in xrange(num_seqs):
_, stateseq = hmm.generate(T,keep=False)
prior_stateseqss[itr2].append(stateseq)
prior_weights.append(hmm.obs_distns[0].weights)
prior_stateseqss = np.array(prior_stateseqss)
assert prior_stateseqss.shape == (num_seqs,num_iter,T)
prior_weights = np.array(prior_weights)
# generate state sequences and parameters using Gibbs
for itr in xrange(num_seqs):
hmm.generate(T,keep=True)
assert len(hmm.states_list) == num_seqs
gibbs_stateseqss = [[] for _ in xrange(num_seqs)]
gibbs_weights = []
for itr in xrange(num_iter):
for s in hmm.states_list:
s.data = None
hmm._generate_obs(s) # resamples data given state sequence, obs params
hmm.resample_model() # resamples everything else as usual
for itr2, s in enumerate(hmm.states_list):
gibbs_stateseqss[itr2].append(s.stateseq)
gibbs_weights.append(hmm.obs_distns[0].weights)
gibbs_stateseqss = np.array(gibbs_stateseqss)
assert gibbs_stateseqss.shape == (num_seqs,num_iter,T)
gibbs_weights = np.array(gibbs_weights)
# test that they look similar by checking probability of co-assignment
time_indices = np.arange(T)
seq_indices = np.arange(num_seqs)
for itr in xrange(num_checks):
i,j = np.random.choice(time_indices,replace=False,size=2)
si,sj = np.random.choice(seq_indices,replace=True,size=2)
prior_prob_of_coassignment = \
(prior_stateseqss[si,:,i] == prior_stateseqss[sj,:,j]).std()
gibbs_prob_of_coassignment = \
(gibbs_stateseqss[si,:,i] == gibbs_stateseqss[sj,:,j]).std()
assert np.isclose(
prior_prob_of_coassignment,gibbs_prob_of_coassignment,
rtol=0.025,atol=0.025,
)
# test that they look similar by checking parameters
testing.populations_eq_quantile_plot(prior_weights,gibbs_weights,fig=fig)
figpath = os.path.join(figure_dir_path,
'discrete_geweke_multiple_seqs_test_weights.pdf')
plt.savefig(figpath)
| mit |
gclenaghan/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
h-mayorquin/camp_india_2016 | project2_direction/extract_voltage_traces.py | 1 | 1058 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2.0)
file_to_read_from = './tempV.dat'
data = np.genfromtxt(fname = file_to_read_from, dtype ='float')
number_of_points, number_of_plots = data.shape
# Time parameters
t_start = 0
t_end = 200.0
time = np.linspace(t_start, t_end, number_of_points)
# Plot parameters
y_lim = [-80, -60]
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(121)
for i in range(number_of_plots / 2):
ax1.plot(time, data[:,i])
ax1.hold(True)
ax1.set_ylim(y_lim)
ax1.set_ylabel('Volatge (mV)')
ax1.set_xlabel('Time (ms)')
ax1.set_title('Voltage traces for the IN sequence')
# ax.legend()
ax2 = fig.add_subplot(122, sharey=ax1)
for i in range(number_of_plots / 2, number_of_plots):
ax2.plot(time, data[:,i])
ax2.hold(True)
ax2.set_ylim(y_lim)
#ax2.set_ylabel('Volatge (mV)')
ax2.set_xlabel('Time (ms)')
ax2.set_title('Voltage traces for the OUT sequence')
plt.setp(ax2.get_yticklabels(), visible=False)
# ax.legend()
plt.show()
| mit |
aetilley/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
wmvanvliet/mne-python | mne/channels/channels.py | 1 | 73895 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Teon Brooks <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT, _handle_default
from ..transforms import _frame_to_str
from ..utils import (verbose, logger, warn,
_check_preload, _validate_type, fill_doc, _check_option,
_get_stim_channel, _check_fname)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import (anonymize_info, Info, MontageMixin, create_info,
_rename_comps)
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx,
_get_channel_types, get_channel_type_constants,
_pick_data_channels)
from ..io.tag import _rename_list
from ..io.write import DATE_NONE
from ..io._digitization import _get_data_as_dict_from_dig
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
nmag = np.sum(
[c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
# Our helmet does not match very well, so let's just create it
have_helmet = False
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',
'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude',
'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr',
'ecog', 'seeg', 'dbs']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(instances, copy=True, verbose=None):
"""Equalize channel picks and ordering across multiple MNE-Python objects.
First, all channels that are not common to each object are dropped. Then,
using the first object in the list as a template, the channels of each
object are re-ordered to match the template. The end result is that all
given objects define the same channels, in the same order.
Parameters
----------
instances : list
A list of MNE-Python objects to equalize the channels for. Objects can
be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,
CrossSpectralDensity or Info.
copy : bool
When dropping and/or re-ordering channels, an object will be copied
when this parameter is set to ``True``. When set to ``False`` (the
default) the dropping and re-ordering of channels happens in-place.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
equalized_instances : list
A list of MNE-Python objects that have the same channels defined in the
same order.
Notes
-----
This function operates inplace.
"""
from ..cov import Covariance
from ..io.base import BaseRaw
from ..io.meas_info import Info
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..forward import Forward
from ..time_frequency import _BaseTFR, CrossSpectralDensity
# Instances need to have a `ch_names` attribute and a `pick_channels`
# method that supports `ordered=True`.
allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,
Covariance, CrossSpectralDensity, Info)
allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, "
"CrossSpectralDensity or Info")
for inst in instances:
_validate_type(inst, allowed_types, "Instances to be modified",
allowed_types_str)
chan_template = instances[0].ch_names
logger.info('Identifying common channels ...')
channels = [set(inst.ch_names) for inst in instances]
common_channels = set(chan_template).intersection(*channels)
all_channels = set(chan_template).union(*channels)
dropped = list(set(all_channels - common_channels))
# Preserve the order of chan_template
order = np.argsort([chan_template.index(ch) for ch in common_channels])
common_channels = np.array(list(common_channels))[order].tolist()
# Update all instances to match the common_channels list
reordered = False
equalized_instances = []
for inst in instances:
# Only perform picking when needed
if inst.ch_names != common_channels:
if copy:
inst = inst.copy()
inst.pick_channels(common_channels, ordered=True)
if len(inst.ch_names) == len(common_channels):
reordered = True
equalized_instances.append(inst)
if dropped:
logger.info('Dropped the following channels:\n%s' % dropped)
elif reordered:
logger.info('Channels have been re-ordered.')
return equalized_instances
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
return _get_channel_types(self.info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
@fill_doc
def get_montage(self):
"""Get a DigMontage from instance.
Returns
-------
%(montage)s
"""
from ..channels.montage import make_dig_montage
if self.info['dig'] is None:
return None
# obtain coord_frame, and landmark coords
# (nasion, lpa, rpa, hsp, hpi) from DigPoints
montage_bunch = _get_data_as_dict_from_dig(self.info['dig'])
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# get the channel names and chs data structure
ch_names, chs = self.info['ch_names'], self.info['chs']
picks = pick_types(self.info, meg=False, eeg=True,
seeg=True, ecog=True, dbs=True)
# channel positions from dig do not match ch_names one to one,
# so use loc[:3] instead
ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks}
# create montage
montage = make_dig_montage(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return montage
channel_type_constants = get_channel_type_constants()
_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in
channel_type_constants.items()}
_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in
channel_type_constants.items()}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(MontageMixin):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', forward=None, verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
%(set_eeg_reference_ref_channels)s
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
%(set_eeg_reference_see_also_notes)s
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type,
forward=forward)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float64)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
@verbose
def set_channel_types(self, mapping, verbose=None):
"""Define the sensor type of channels.
Parameters
----------
mapping : dict
A dictionary mapping a channel to a sensor type (str), e.g.,
``{'EEG061': 'eog'}``.
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst,
ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude,
fnirs_fd_phase, fnirs_od
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_cw_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE
elif ch_type == 'fnirs_fd_ac_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE
elif ch_type == 'fnirs_fd_phase':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
return self
@verbose
def rename_channels(self, mapping, allow_duplicates=False, verbose=None):
"""Rename channels.
Parameters
----------
%(rename_channels_mapping_duplicates)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..io import BaseRaw
ch_names_orig = list(self.info['ch_names'])
rename_channels(self.info, mapping, allow_duplicates)
# Update self._orig_units for Raw
if isinstance(self, BaseRaw):
# whatever mapping was provided, now we can just use a dict
mapping = dict(zip(ch_names_orig, self.info['ch_names']))
if self._orig_units is not None:
for old_name, new_name in mapping.items():
if old_name != new_name:
self._orig_units[new_name] = self._orig_units[old_name]
del self._orig_units[old_name]
ch_names = self.annotations.ch_names
for ci, ch in enumerate(ch_names):
ch_names[ci] = tuple(mapping.get(name, name) for name in ch)
return self
@verbose
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True, sphere=None,
verbose=None):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag,
grad, eeg, seeg, dbs, and ecog channels are plotted. If
None (default), then channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose_meth)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show, sphere=sphere, verbose=verbose)
@verbose
def anonymize(self, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
Parameters
----------
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
Notes
-----
%(anonymize_info_notes)s
.. versionadded:: 0.13.0
"""
anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
verbose=verbose)
self.set_meas_date(self.info['meas_date']) # unify annot update
return self
def set_meas_date(self, meas_date):
"""Set the measurement start date.
Parameters
----------
meas_date : datetime | float | tuple | None
The new measurement date.
If datetime object, it must be timezone-aware and in UTC.
A tuple of (seconds, microseconds) or float (alias for
``(meas_date, 0)``) can also be passed and a datetime
object will be automatically created. If None, will remove
the time reference.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified raw instance. Operates in place.
See Also
--------
mne.io.Raw.anonymize
Notes
-----
If you want to remove all time references in the file, call
:func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`
after calling ``inst.set_meas_date(None)``.
.. versionadded:: 0.20
"""
from ..annotations import _handle_meas_date
meas_date = _handle_meas_date(meas_date)
self.info['meas_date'] = meas_date
# clear file_id and meas_id if needed
if meas_date is None:
for key in ('file_id', 'meas_id'):
value = self.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
if hasattr(self, 'annotations'):
self.annotations._orig_time = meas_date
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=False, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False,
ecog=False, fnirs=False, csd=False, dbs=False, include=(),
exclude='bads', selection=None, verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all
gradiometers, or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is
not False. Can also be the string options for the ``meg``
parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If ``True`` include respiratory channels.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
EEG-CSD channels.
dbs : bool
Deep brain stimulation channels.
include : list of str
List of additional channels to include. If empty do not include
any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, dbs=dbs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
# remove dropped channel types from reject and flat
if getattr(self, 'reject', None) is not None:
# use list(self.reject) to avoid RuntimeError for changing
# dictionary size during iteration
for ch_type in list(self.reject):
if ch_type not in self:
del self.reject[ch_type]
if getattr(self, 'flat', None) is not None:
for ch_type in list(self.flat):
if ch_type not in self:
del self.flat[ch_type]
return self
def pick_channels(self, ch_names, ordered=False):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
ordered : bool
If True (default False), ensure that the order of the channels in
the modified instance matches the order of ``ch_names``.
.. versionadded:: 0.20.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)
return self._pick_drop_channels(picks)
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io import BaseRaw
from ..time_frequency import AverageTFR, EpochsTFR
msg = 'adding, dropping, or reordering channels'
if isinstance(self, BaseRaw):
if self._projector is not None:
_check_preload(self, f'{msg} after calling .apply_proj()')
else:
_check_preload(self, msg)
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if getattr(self, '_read_picks', None) is not None:
self._read_picks = [r[idx] for r in self._read_picks]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
for key in ('_comp', '_projector'):
mat = getattr(self, key, None)
if mat is not None:
setattr(self, key, mat[idx][:, idx])
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
if hasattr(self, '_data'): # skip non-preloaded Raw
self._data = self._data.take(idx, axis=axis)
else:
assert isinstance(self, BaseRaw) and not self.preload
if isinstance(self, BaseRaw):
self.annotations._prune_ch_names(self.info, on_missing='ignore')
self._pick_projs()
return self
def _pick_projs(self):
"""Keep only projectors which apply to at least 1 data channel."""
drop_idx = []
for idx, proj in enumerate(self.info['projs']):
if not set(self.info['ch_names']) & set(proj['data']['col_names']):
drop_idx.append(idx)
for idx in drop_idx:
logger.info(f"Removing projector {self.info['projs'][idx]}")
if drop_idx and hasattr(self, 'del_proj'):
self.del_proj(drop_idx)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object.
force_update_info : bool
If True, force the info for objects to be appended to match the
values in ``self``. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
# We should never use these since data are preloaded, let's just
# set it to something large and likely to break (2 ** 31 - 1)
extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])
assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)
self._read_picks = [
np.concatenate([r, extra_idx]) for r in self._read_picks]
assert all(len(r) == self.info['nchan'] for r in self._read_picks)
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin='auto', method=None, verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of channels
using the minimum-norm method.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'`` (default), which means a head-digitization-based
origin fit.
.. versionadded:: 0.17
method : dict
Method to use for each channel type.
Currently only the key "eeg" has multiple options:
- ``"spline"`` (default)
Use spherical spline interpolation.
- ``"MNE"``
Use minimum-norm projection to a sphere and back.
This is the method used for MEG channels.
The value for "meg" is "MNE", and the value for
"fnirs" is "nearest". The default (None) is thus an alias for::
method=dict(meg="MNE", eeg="spline", fnirs="nearest")
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..bem import _check_origin
from .interpolation import _interpolate_bads_eeg,\
_interpolate_bads_meeg, _interpolate_bads_nirs
_check_preload(self, "interpolation")
method = _handle_default('interpolation_method', method)
for key in method:
_check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))
_check_option("method['eeg']", method['eeg'], ('spline', 'MNE'))
_check_option("method['meg']", method['meg'], ('MNE',))
_check_option("method['fnirs']", method['fnirs'], ('nearest',))
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
logger.info('Interpolating bad channels')
origin = _check_origin(origin, self.info)
if method['eeg'] == 'spline':
_interpolate_bads_eeg(self, origin=origin)
eeg_mne = False
else:
eeg_mne = True
_interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne)
_interpolate_bads_nirs(self)
if reset_bads is True:
self.info['bads'] = []
return self
@verbose
def rename_channels(info, mapping, allow_duplicates=False, verbose=None):
"""Rename channels.
Parameters
----------
info : dict
Measurement info to modify.
%(rename_channels_mapping_duplicates)s
%(verbose)s
"""
_validate_type(info, Info, 'info')
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates:
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
ch_names_mapping = dict()
for ch, ch_name in zip(info['chs'], ch_names):
ch_names_mapping[ch['ch_name']] = ch_name
ch['ch_name'] = ch_name
# .get b/c fwd info omits it
_rename_comps(info.get('comps', []), ch_names_mapping)
if 'projs' in info: # fwd might omit it
for proj in info['projs']:
proj['data']['col_names'][:] = \
_rename_list(proj['data']['col_names'], ch_names_mapping)
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_adjacency(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtriptoolbox.org/template/neighbours/>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
find_ch_adjacency
Notes
-----
This function is closely related to :func:`find_ch_adjacency`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_adjacency` can compute the adjacency matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
adjacency = _ch_neighbor_adjacency(ch_names, neighbors)
# picking before constructing matrix is buggy
adjacency = adjacency[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return adjacency, ch_names
def _ch_neighbor_adjacency(ch_names, neighbors):
"""Compute sensor adjacency matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_adjacency : scipy.sparse matrix
The adjacency matrix.
"""
from scipy import sparse
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_adjacency = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True
ch_adjacency = sparse.csr_matrix(ch_adjacency)
return ch_adjacency
def find_ch_adjacency(info, ch_type):
"""Find the adjacency matrix for the given channels.
This function tries to infer the appropriate adjacency matrix template
for the given channels. If a template is not found, the adjacency matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
read_ch_adjacency
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate adjacency matrix template only
works for MEG data at the moment. This means that the adjacency matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_adjacency` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading adjacency matrix for %s.' % conn_name)
return read_ch_adjacency(conn_name)
logger.info('Could not find a adjacency matrix for the data. '
'Computing adjacency based on Delaunay triangulations.')
return _compute_ch_adjacency(info, ch_type)
def _compute_ch_adjacency(info, ch_type):
"""Compute channel adjacency matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
"""
from scipy import sparse
from scipy.spatial import Delaunay
from .. import spatial_tris_adjacency
from ..channels.layout import _find_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad'
and any([coil_type in [ch['coil_type']
for ch in info['chs']]
for coil_type in
[FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_NM_122]]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute adjacency '
'matrix.')
# only for one of the pair
xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)
else:
xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)
tri = Delaunay(xy)
neighbors = spatial_tris_adjacency(tri.simplices)
if combine_grads:
ch_adjacency = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True
ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_adjacency = sparse.csr_matrix(ch_adjacency)
else:
ch_adjacency = sparse.lil_matrix(neighbors)
ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))
ch_adjacency = ch_adjacency.tocsr()
return ch_adjacency, ch_names
def fix_mag_coil_types(info, use_cal=False):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
use_cal : bool
If True, further refine the check for old coil types by checking
``info['chs'][ii]['cal']``.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info, use_cal)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info, use_cal=False):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
# From email exchanges, systems with the larger T2 coil only use the cal
# value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10
# (Triux). So we can use a simple check for > 3e-11.
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
if use_cal:
if ch['cal'] > 3e-11:
old_mag_inds.append(ii)
else:
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,
has_csd_coils)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in ``midline`` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in ``info["ch_names"]``. If possible,
this lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the
``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
def combine_channels(inst, groups, method='mean', keep_stim=False,
drop_bad=False):
"""Combine channels based on specified channel grouping.
Parameters
----------
inst : instance of Raw, Epochs, or Evoked
An MNE-Python object to combine the channels for. The object can be of
type Raw, Epochs, or Evoked.
groups : dict
Specifies which channels are aggregated into a single channel, with
aggregation method determined by the ``method`` parameter. One new
pseudo-channel is made per dict entry; the dict values must be lists of
picks (integer indices of ``ch_names``). For example::
groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
method : str | callable
Which method to use to combine channels. If a :class:`str`, must be one
of 'mean', 'median', or 'std' (standard deviation). If callable, the
callable must accept one positional input (data of shape ``(n_channels,
n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,
n_times)``. For example with an instance of Raw or Evoked::
method = lambda data: np.mean(data, axis=0)
Another example with an instance of Epochs::
method = lambda data: np.median(data, axis=1)
Defaults to ``'mean'``.
keep_stim : bool
If ``True``, include stimulus channels in the resulting object.
Defaults to ``False``.
drop_bad : bool
If ``True``, drop channels marked as bad before combining. Defaults to
``False``.
Returns
-------
combined_inst : instance of Raw, Epochs, or Evoked
An MNE-Python object of the same type as the input ``inst``, containing
one virtual channel for each group in ``groups`` (and, if ``keep_stim``
is ``True``, also containing stimulus channels).
"""
from ..io import BaseRaw, RawArray
from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray
ch_axis = 1 if isinstance(inst, BaseEpochs) else 0
ch_idx = list(range(inst.info['nchan']))
ch_names = inst.info['ch_names']
ch_types = inst.get_channel_types()
inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()
groups = OrderedDict(deepcopy(groups))
# Convert string values of ``method`` into callables
# XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py
if isinstance(method, str):
method_dict = {key: partial(getattr(np, key), axis=ch_axis)
for key in ('mean', 'median', 'std')}
try:
method = method_dict[method]
except KeyError:
raise ValueError('"method" must be a callable, or one of "mean", '
f'"median", or "std"; got "{method}".')
# Instantiate channel info and data
new_ch_names, new_ch_types, new_data = [], [], []
if not isinstance(keep_stim, bool):
raise TypeError('"keep_stim" must be of type bool, not '
f'{type(keep_stim)}.')
if keep_stim:
stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))
if stim_ch_idx:
new_ch_names = [ch_names[idx] for idx in stim_ch_idx]
new_ch_types = [ch_types[idx] for idx in stim_ch_idx]
new_data = [np.take(inst_data, idx, axis=ch_axis)
for idx in stim_ch_idx]
else:
warn('Could not find stimulus channels.')
# Get indices of bad channels
ch_idx_bad = []
if not isinstance(drop_bad, bool):
raise TypeError('"drop_bad" must be of type bool, not '
f'{type(drop_bad)}.')
if drop_bad and inst.info['bads']:
ch_idx_bad = pick_channels(ch_names, inst.info['bads'])
# Check correctness of combinations
for this_group, this_picks in groups.items():
# Check if channel indices are out of bounds
if not all(idx in ch_idx for idx in this_picks):
raise ValueError('Some channel indices are out of bounds.')
# Check if heterogeneous sensor type combinations
this_ch_type = np.array(ch_types)[this_picks]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; '
f'"{this_group}" contains types {types}.')
# Remove bad channels
these_bads = [idx for idx in this_picks if idx in ch_idx_bad]
this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]
if these_bads:
logger.info('Dropped the following channels in group '
f'{this_group}: {these_bads}')
# Check if combining less than 2 channel
if len(set(this_picks)) < 2:
warn(f'Less than 2 channels in group "{this_group}" when '
f'combining by method "{method}".')
# If all good create more detailed dict without bad channels
groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])
# Combine channels and add them to the new instance
for this_group, this_group_dict in groups.items():
new_ch_names.append(this_group)
new_ch_types.append(this_group_dict['ch_type'])
this_picks = this_group_dict['picks']
this_data = np.take(inst_data, this_picks, axis=ch_axis)
new_data.append(method(this_data))
new_data = np.swapaxes(new_data, 0, ch_axis)
info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,
ch_types=new_ch_types)
if isinstance(inst, BaseRaw):
combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,
verbose=inst.verbose)
elif isinstance(inst, BaseEpochs):
combined_inst = EpochsArray(new_data, info, events=inst.events,
tmin=inst.times[0], verbose=inst.verbose)
elif isinstance(inst, Evoked):
combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
return combined_inst
# NeuroMag channel groupings
_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',
'Right-parietal', 'Left-occipital', 'Right-occipital',
'Left-frontal', 'Right-frontal']
_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128']
def _divide_to_regions(info, add_stim=True):
"""Divide channels to regions by positions."""
from scipy.stats import zscore
picks = _pick_data_channels(info, exclude=[])
chs_in_lobe = len(picks) // 4
pos = np.array([ch['loc'][:3] for ch in info['chs']])
x, y, z = pos.T
frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]]
picks = np.setdiff1d(picks, frontal)
occipital = picks[np.argsort(y[picks])[:chs_in_lobe]]
picks = np.setdiff1d(picks, occipital)
temporal = picks[np.argsort(z[picks])[:chs_in_lobe]]
picks = np.setdiff1d(picks, temporal)
lt, rt = _divide_side(temporal, x)
lf, rf = _divide_side(frontal, x)
lo, ro = _divide_side(occipital, x)
lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks.
# Because of the way the sides are divided, there may be outliers in the
# temporal lobes. Here we switch the sides for these outliers. For other
# lobes it is not a big problem because of the vicinity of the lobes.
with np.errstate(invalid='ignore'): # invalid division, greater compare
zs = np.abs(zscore(x[rt]))
outliers = np.array(rt)[np.where(zs > 2.)[0]]
rt = list(np.setdiff1d(rt, outliers))
with np.errstate(invalid='ignore'): # invalid division, greater compare
zs = np.abs(zscore(x[lt]))
outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]]))
lt = list(np.setdiff1d(lt, outliers))
l_mean = np.mean(x[lt])
r_mean = np.mean(x[rt])
for outlier in outliers:
if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]):
lt.append(outlier)
else:
rt.append(outlier)
if add_stim:
stim_ch = _get_stim_channel(None, info, raise_error=False)
if len(stim_ch) > 0:
for region in [lf, rf, lo, ro, lp, rp, lt, rt]:
region.append(info['ch_names'].index(stim_ch[0]))
return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf),
('Left-parietal', lp), ('Right-parietal', rp),
('Left-occipital', lo), ('Right-occipital', ro),
('Left-temporal', lt), ('Right-temporal', rt)])
def _divide_side(lobe, x):
"""Make a separation between left and right lobe evenly."""
lobe = np.asarray(lobe)
median = np.median(x[lobe])
left = lobe[np.where(x[lobe] < median)[0]]
right = lobe[np.where(x[lobe] > median)[0]]
medians = np.where(x[lobe] == median)[0]
left = np.sort(np.concatenate([left, lobe[medians[1::2]]]))
right = np.sort(np.concatenate([right, lobe[medians[::2]]]))
return list(left), list(right)
@verbose
def read_vectorview_selection(name, fname=None, info=None, verbose=None):
"""Read Neuromag Vector View channel selection from a file.
Parameters
----------
name : str | list of str
Name of the selection. If a list, the selections are combined.
Supported selections are: ``'Vertex'``, ``'Left-temporal'``,
``'Right-temporal'``, ``'Left-parietal'``, ``'Right-parietal'``,
``'Left-occipital'``, ``'Right-occipital'``, ``'Left-frontal'`` and
``'Right-frontal'``. Selections can also be matched and combined by
spcecifying common substrings. For example, ``name='temporal`` will
produce a combination of ``'Left-temporal'`` and ``'Right-temporal'``.
fname : str
Filename of the selection file (if ``None``, built-in selections are
used).
info : instance of Info
Measurement info file, which will be used to determine the spacing
of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag
systems and ``'MEG0111'`` for new ones.
%(verbose)s
Returns
-------
sel : list of str
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = op.join(op.dirname(__file__), '..', 'data', 'mne_analyze.sel')
fname = _check_fname(fname, must_exist=True, overwrite='read')
# use this to make sure we find at least one match for each name
name_found = {n: False for n in name}
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
| bsd-3-clause |
bnaul/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 15 | 2260 | """Test the rcv1 loader, if the data is available,
or if specifically requested via environment variable
(e.g. for travis cron job)."""
import scipy.sparse as sp
import numpy as np
from functools import partial
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
def test_fetch_rcv1(fetch_rcv1_fxt):
data1 = fetch_rcv1_fxt(shuffle=False)
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert sp.issparse(X1)
assert sp.issparse(Y1)
assert 60915113 == X1.data.size
assert 2606875 == Y1.data.size
# test shapes
assert (804414, 47236) == X1.shape
assert (804414, 103) == Y1.shape
assert (804414,) == s1.shape
assert 103 == len(cat_list)
# test ordering of categories
first_categories = ['C11', 'C12', 'C13', 'C14', 'C15', 'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert num == Y1[:, j].data.size
# test shuffling and subset
data2 = fetch_rcv1_fxt(shuffle=True, subset='train', random_state=77)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# test return_X_y option
fetch_func = partial(fetch_rcv1_fxt, shuffle=False, subset='train')
check_return_X_y(data2, fetch_func)
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
evanbiederstedt/RRBSfun | scripts/PDR_methyl_CLL_RRBS_cw154_StackA1.py | 1 | 2026 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
# set glob subdirectory via cell batch
cll_cells1 = glob.glob("RRBS_cw154*")
for filename in cll_cells1:
df = pd.read_table(filename)
df = df.drop(['chr', 'start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'tss', 'tssDistance', 'genes', 'exons',
'introns', 'promoter', 'cgi',
'geneDensity', 'ctcfUpstream', 'ctcfDownstream','ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance','3PrimeUTRDistance',
'5PrimeUTR', '5PrimeUTRDistance', 'firstExon','geneDistalRegulatoryModulesK562',
'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
num_bins2 = np.ceil(df['avgReadCpGs'].max()/1.25)
df['avgReadCpGs_binned'] = pd.cut(df['avgReadCpGs'], num_bins2, labels=False)
df['read_stack_ID'] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['avgReadCpGs'] = df['avgReadCpGs'].values.round(decimals=0)
df1 = df.groupby(['read_stack_ID', 'avgReadCpGs'])[['thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount', 'total_reads']].sum()
df1.reset_index(inplace=True)
df1["methylation"] = df1["thisMeth"]/(df1["thisMeth"]+df1["thisUnmeth"]) # corrected
df1["PDR_per_stack"] = df1["mixedReadCount"]/df1["total_reads"]
df1.to_csv(str("stacked_") + str(filename) +str(".csv"))
| mit |
jhaux/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
schreiberx/sweet | benchmarks_sphere/paper_jrn_parco_rexi_nonlinear/compare_wt_dt_vs_accuracy_galewsky_reprod_2020_03_21/postprocessing_consolidate_prog_div.py | 4 | 4735 | #! /usr/bin/env python3
import sys
import math
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
mule_plotting_usetex(False)
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_div.res_norm_l1',
'sphere_data_diff_prog_div.res_norm_l2',
'sphere_data_diff_prog_div.res_norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
for tagname_y in tagnames_y:
params = []
params += [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
if True:
"""
Plotting format
"""
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
if y == None:
return True
x = float(x)
y = float(y)
if math.isnan(y):
return True
if 'prog_h' in tagname_y:
if 'l1' in tagname_y:
if y > 1e1:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
if y > 1e1:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
if y > 1e2:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
elif 'prog_div' in tagname_y:
if 'l1' in tagname_y:
if y > 1e1:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
if y > 1e1:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
if y > 1e2:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
else:
print("TODO")
return False
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_plotting_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
if True:
#
# Proper naming and sorting of each label
#
# new data dictionary
data_new = {}
for key, data in d.data.items():
# generate nice tex label
#data['label'] = pp.get_pretty_name(key)
data['label'] = key #pp.get_pretty_name(key)
key_new = pp.get_pretty_name_order(key)+'_'+key
# copy data
data_new[key_new] = copy.copy(data)
# Copy back new data table
d.data = data_new
p = Plotting_ScatteredData()
def fun(p):
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
plt.tick_params(axis='x', which='minor')
p.ax.xaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(8)
plt.tick_params(axis='y', which='minor')
p.ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.yaxis.get_minor_ticks():
tick.label.set_fontsize(6)
annotate_text_template = "{:.1f} / {:.3f}"
p.plot(
data_plotting = d.get_data_float(),
xlabel = xlabel,
ylabel = ylabel,
title = title,
xscale = xscale,
yscale = yscale,
#annotate = True,
#annotate_each_nth_value = 3,
#annotate_fontsize = 6,
#annotate_text_template = annotate_text_template,
legend_fontsize = 8,
grid = True,
outfile = fileid+".pdf",
lambda_fun = fun,
)
print("Data plotting:")
d.print()
d.write(fileid+".csv")
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| mit |
LiaoPan/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
KasperPRasmussen/bokeh | examples/charts/file/donut_multi.py | 6 | 1394 | from bokeh.charts import Donut, show, output_file, vplot
from bokeh.sampledata.autompg import autompg
import pandas as pd
# simple examples with inferred meaning
# implied index
d1 = Donut([2, 4, 5, 2, 8])
# explicit index
d2 = Donut(pd.Series([2, 4, 5, 2, 8], index=['a', 'b', 'c', 'd', 'e']))
# given a categorical series of data with no aggregation
d3 = Donut(autompg.cyl.astype(str))
# given a categorical series of data with no aggregation
d4 = Donut(autompg.groupby('cyl').displ.mean())
# given a categorical series of data with no aggregation
d5 = Donut(autompg.groupby(['cyl', 'origin']).displ.mean(),
hover_text='mean')
# no values specified
d6 = Donut(autompg, label='cyl', agg='count')
# explicit examples
d7 = Donut(autompg, label='cyl',
values='displ', agg='mean')
# nested donut chart for the provided labels, with colors assigned
# by the first level
d8 = Donut(autompg, label=['cyl', 'origin'],
values='displ', agg='mean')
# show altering the spacing in levels
d9 = Donut(autompg, label=['cyl', 'origin'],
values='displ', agg='mean', level_spacing=0.15)
# show altering the spacing in levels
d10 = Donut(autompg, label=['cyl', 'origin'],
values='displ', agg='mean', level_spacing=[0.8, 0.3])
output_file("donut_multi.html", title="donut_multi.py example")
show(vplot(d1, d2, d3, d4, d5, d6, d7, d8, d9, d10))
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
466152112/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/io/parser/test_quoting.py | 2 | 5078 | """
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
from io import StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame
import pandas._testing as tm
@pytest.mark.parametrize(
"kwargs,msg",
[
(dict(quotechar="foo"), '"quotechar" must be a(n)? 1-character string'),
(
dict(quotechar=None, quoting=csv.QUOTE_MINIMAL),
"quotechar must be set if quoting enabled",
),
(dict(quotechar=2), '"quotechar" must be string, not int'),
],
)
def test_bad_quote_char(all_parsers, kwargs, msg):
data = "1,2,3"
parser = all_parsers
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
@pytest.mark.parametrize(
"quoting,msg",
[
("foo", '"quoting" must be an integer'),
(5, 'bad "quoting" value'), # quoting must be in the range [0, 3]
],
)
def test_bad_quoting(all_parsers, quoting, msg):
data = "1,2,3"
parser = all_parsers
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting)
def test_quote_char_basic(all_parsers):
parser = all_parsers
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"])
def test_quote_char_various(all_parsers, quote_char):
parser = all_parsers
expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
data = 'a,b,c\n1,2,"cat"'
new_data = data.replace('"', quote_char)
result = parser.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
@pytest.mark.parametrize("quote_char", ["", None])
def test_null_quote_char(all_parsers, quoting, quote_char):
kwargs = dict(quotechar=quote_char, quoting=quoting)
data = "a,b,c\n1,2,3"
parser = all_parsers
if quoting != csv.QUOTE_NONE:
# Sanity checking.
msg = "quotechar must be set if quoting enabled"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,exp_data",
[
(dict(), [[1, 2, "foo"]]), # Test default.
# QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
(dict(quotechar='"', quoting=csv.QUOTE_MINIMAL), [[1, 2, "foo"]]),
# QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
(dict(quotechar='"', quoting=csv.QUOTE_ALL), [[1, 2, "foo"]]),
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone.
(dict(quotechar='"', quoting=csv.QUOTE_NONE), [[1, 2, '"foo"']]),
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
(dict(quotechar='"', quoting=csv.QUOTE_NONNUMERIC), [[1.0, 2.0, "foo"]]),
],
)
def test_quoting_various(all_parsers, kwargs, exp_data):
data = '1,2,"foo"'
parser = all_parsers
columns = ["a", "b", "c"]
result = parser.read_csv(StringIO(data), names=columns, **kwargs)
expected = DataFrame(exp_data, columns=columns)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])]
)
def test_double_quote(all_parsers, doublequote, exp_data):
parser = all_parsers
data = 'a,b\n3,"4 "" 5"'
result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote)
expected = DataFrame(exp_data, columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quotechar", ['"', "\u0001"])
def test_quotechar_unicode(all_parsers, quotechar):
# see gh-14477
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), quotechar=quotechar)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("balanced", [True, False])
def test_unbalanced_quoting(all_parsers, balanced):
# see gh-22789.
parser = all_parsers
data = 'a,b,c\n1,2,"3'
if balanced:
# Re-balance the quoting and read in without errors.
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data + '"'))
tm.assert_frame_equal(result, expected)
else:
msg = (
"EOF inside string starting at row 1"
if parser.engine == "c"
else "unexpected end of data"
)
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
| bsd-3-clause |
lbishal/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
altairpearl/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/CG/SmoothSol/MHD.py | 1 | 8725 | #!/usr/bin/python
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import BiLinearForms as forms
import DirectOperations as Direct
import MatrixOperations as MO
m = 5
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'no'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
nn = 2**(xx )
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
mesh = RectangleMesh(0, 0, 1, 1, nn, nn,'crossed')
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
parameters['reorder_dofs_serial'] = False
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0 =Expression(("sin(x[1])*exp(x[0])","cos(x[1])*exp(x[0])"))
p0 = Expression("sin(x[0])*cos(x[1])")
b0 = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
r0 = Expression("x[1]*(x[1]-1)*x[0]*(x[0]-1)")
bcu = DirichletBC(W.sub(0),u0, boundary)
bcp = DirichletBC(W.sub(1),p0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
bc = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1e2
Mu_m = 1e2
MU = 1
Laplacian = -MU*Expression(("0","0"))
Advection = Expression(("pow(exp(x[0]),2)","0"))
gradPres = Expression(("cos(x[1])*cos(x[0])","-sin(x[1])*sin(x[0])"))
b_bar = Expression(("3*pow(x[0],2)-2*x[0]-3*pow(x[1],2)+2*x[1]"))
NS_couple = -kappa*b_bar*Expression(("-x[0]*x[0]*(x[0]-1)","x[1]*x[1]*(x[1]-1)"))
F_NS = Laplacian+Advection+gradPres+NS_couple
CurlCurl = kappa*Mu_m *Expression(("-6*x[1]+2","-6*x[0]+2"))
gradR = Expression(("(2*x[0]-1)*x[1]*(x[1]-1)","(2*x[1]-1)*x[0]*(x[0]-1)"))
M_couple = -kappa*Expression(("pow(x[0],2)*exp(x[0])*cos(x[1])*(x[0] - 1) - 2*x[1]*exp(x[0])*cos(x[1])*(x[1] - 1) - pow(x[1],2)*exp(x[0])*cos(x[1]) + pow(x[1],2)*exp(x[0])*sin(x[1])*(x[1] - 1)","pow(x[1],2)*exp(x[0])*cos(x[1])*(x[1] - 1) - 2*x[0]*exp(x[0])*sin(x[1])*(x[0] - 1) - pow(x[0],2)*exp(x[0])*sin(x[1]) - pow(x[0],2)*exp(x[0])*sin(x[1])*(x[0] - 1)"))
F_M = CurlCurl+gradR +M_couple
params = [kappa,Mu_m,MU]
u_k,b_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[gradPres,F_M],params,Neumann=Expression(("0","0")))
if (split == "Linear"):
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,split)
a = ns+maxwell+CoupleTerm
L = Lmaxwell+Lns
elif (split == "NoneLinear"):
linear, Nlinear, RHS = MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,split)
a = Nlinear
parameters['linear_algebra_backend'] = 'uBLAS'
epsu = 1.0 # error measure ||u-u_k||
epsb = 1.0
tol = 1.0E-8 # tolerance
iter = 0 # iteration counter
maxiter = 10 # max no of iterations allowed
SolutionTime = 0
while epsu > tol and iter < maxiter:
iter += 1
uu = Function(W)
AA, bb = assemble_system(maxwell+ns+CoupleTerm, Lmaxwell + Lns, bc)
VelPres = Velocitydim[xx-1][0] +Pressuredim[xx-1][0]
A,b,x = Direct.RemoveRowCol(AA,bb,VelPres)
ksp = PETSc.KSP().create()
pc = PETSc.PC().create()
ksp.setOperators(A)
ksp.setFromOptions()
print '\n\n\nSolving with:', ksp.getType()
tic()
ksp.solve(b, x)
time = toc()
print time
SolutionTime = SolutionTime +time
u_k,b_k,epsu, epsb = Direct.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
SolTime[xx-1] = SolutionTime/iter
ue =Expression(("sin(x[1])*exp(x[0])","cos(x[1])*exp(x[0])"))
pe = Expression("sin(x[0])*cos(x[1])")
be = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
re = Expression("x[1]*(x[1]-1)*x[0]*(x[0]-1)")
ExactSolution = [ue,pe,be,re]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Direct.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
if xx == 1:
l2uorder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
print "\n\n Velocity convergence"
VelocityTitles = ["Total DoF","V DoF","Soln Time","V-L2","L2-order","V-H1","H1-order"]
VelocityValues = np.concatenate((Wdim,Velocitydim,SolTime,errL2u,l2uorder,errH1u,H1uorder),axis=1)
VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles)
pd.set_option('precision',3)
VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e")
VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e")
VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f")
VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f")
print VelocityTable
print "\n\n Pressure convergence"
PressureTitles = ["Total DoF","P DoF","Soln Time","P-L2","L2-order"]
PressureValues = np.concatenate((Wdim,Pressuredim,SolTime,errL2p,l2porder),axis=1)
PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles)
pd.set_option('precision',3)
PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e")
PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f")
print PressureTable
print "\n\n Magnetic convergence"
MagneticTitles = ["Total DoF","B DoF","Soln Time","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((Wdim,Magneticdim,SolTime,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable
print "\n\n Lagrange convergence"
LagrangeTitles = ["Total DoF","R DoF","Soln Time","R-L2","L2-order","R-H1","H1-order"]
LagrangeValues = np.concatenate((Wdim,Lagrangedim,SolTime,errL2r,l2rorder,errH1r,H1rorder),axis=1)
LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
pd.set_option('precision',3)
LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
print LagrangeTable
if (ShowResultPlots == 'yes'):
plot(ua)
plot(interpolate(ue,Velocity))
plot(pp)
plot(interpolate(pe,Pressure))
plot(ba)
plot(interpolate(be,Magnetic))
plot(ra)
plot(interpolate(re,Lagrange))
interactive()
| mit |
bosmanoglu/adore-doris | lib/python/basic/graphics/graphics.py | 1 | 34676 | import numpy as np
import pylab as P
import basic
import scipy
from IPython.core.debugger import set_trace
def matshowClick(A, value=True, vmin=None, vmax=None):
def onclick(event):
try:
y=np.round(event.xdata);
except:
return
x=np.round(event.ydata);
if value==True:
print (x,y, A[x,y])
else:
print (x,y)
s=basic.nonaninf(A).std();
m=basic.nonaninf(A).mean();
if vmin is None:
vmin=m-2*s
if vmax is None:
vmax=m+2*s
fig=P.figure();
ax=fig.add_subplot(111);ax.matshow(A, vmin=vmin, vmax=vmax);
fig.canvas.mpl_connect('button_press_event', onclick);
return fig
def imshowsc(A,n=2):
"""
figureHandle=matshow(array2d);
Automatically sets colorbar to 1 sigma of the data in window.
"""
print("DOES NOT WORK")
def onclick(event):
limits=P.axis();
B=A[limits[2]:limits[3],limits[0]:limits[1]]
#P.imshow(B);
s=B[~np.isnan(B)].std();
ax.clim([-2*s,2*s]);
fig=P.figure();
ax=fig.add_subplot(111);ax.matshow(A);
fig.canvas.mpl_connect('button_press_event', onclick);
return fig
def clickScat(array2d, array3d, xScat=None, xerror3d=None, yerror3d=None, array3d2=None, xerror3d2=None, yerror3d2=None, fn=None, xMap=None, yMap=None,
modelError=False, ylimScat=None):
"""
figureHandles=clickScat(array2d, array3d, xScat=None, xerror3d=None, yerror3d=None, array3d2=None, xerror3d2=None, yerror3d2=None, fn=None, xMap=None, yMap=None):
xScat: x-axis variables for Scatter Plot. Has to be the same length as last dimension of array3d.shape[2]
xerror3d: errorbars for x-axis. two sided.
fn:'annual'
"""
import insar
dateaxis=False;
if xScat is None:
xScat=np.r_[0:array3d.shape[2]];
elif isinstance(xScat[0], P.matplotlib.dates.datetime.date):
xScat=P.matplotlib.dates.date2num(xScat);
dateaxis=True;
def onclick(event):
P.figure(fh.number);
P.clf();
#ax = P.gca()
#inv = ax.transData.inverted()
#A=inv.transform((event.x, event.y))
#A[1]=np.int(np.round((1-A[1])*array2d.shape[1]))
#A[0]=np.int(np.round((A[0])*array2d.shape[0]))
try:
y=np.round(event.xdata);
except:
return
x=np.round(event.ydata);
#ARRAY MAPPING IS first axis y(rows) and second axis is cols (x)
if all(np.isnan(array3d[x, y,:])):
#if there are no points to plot (all nan) then return
return
#Plot second scatter data.
if array3d2 is not None:
if isinstance(array3d2, list):
if yerror3d is None:
w=np.ones(array3d[x, y,:].shape);
else:
w=basic.rescale(1./yerror3d[x,y,:], [1,2])
markers=['*','+','s','d','x','v','<','>','^']
m=0;
for arr in array3d2:
print ("%d, %d, %d" % (x,y,m))
P.scatter(xScat, arr[x, y,:], marker=markers[m]);
idx=~( np.isnan(arr[x, y,:]) | np.isnan(array3d[x, y,:]))
#c=insar.crosscorrelate(basic.nonan(w[idx]*arr[x, y,idx]),basic.nonan(w[idx]*array3d[x, y,idx]))
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w[idx]*arr[x, y,idx]), basic.nonan(w[idx]*array3d[x, y,idx]))
P.annotate(str("r2[%s]: %0.2f" % (markers[m],r_value)), (0,0.9-m*0.05), xycoords='axes fraction')
m=m+1;
else:
if xerror3d2 is None:
xerr=None;
else:
xerr=xerror3d2[x,y,:]
if yerror3d2 is None:
yerr=None;
else:
yerr=yerror3d2[x, y,:]
P.errorbar(xScat,array3d2[x, y,:], xerr=xerr, yerr=yerr, marker='*', fmt='o');
#Plot function result as scatter data.
p=None
if fn is not None:
if fn=='linear_amplitude_annual':
dataMask=~np.isnan(array3d[x, y,:])
p0=np.array([1,0,0,basic.nonan(array3d[x, y,:]).mean() ])
fitfun=lambda p: (p[0]+p[1]*xScat[dataMask]/365. )* np.cos(2*np.pi*xScat[dataMask]/365.+p[2]) + p[3]
xScat2=np.linspace(xScat.min(),xScat.max())
fitfun2=lambda p: (p[0]+p[1]*xScat2/365.) * np.cos(2*np.pi*xScat2/365.+p[2]) + p[3]
#errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
if yerror3d is None:
w=np.ones(array3d[x, y,:].shape);
else:
w=basic.rescale(1./yerror3d[x,y,:], [1,2])
errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
#p=scipy.optimize.fmin_powell(errfun, p0)
p=scipy.optimize.leastsq(errfun, p0);
p=p[0];
P.scatter(xScat[dataMask], fitfun(p), marker='^');
sortedxy= np.squeeze(np.dstack([xScat2, fitfun2(p)]));
sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
P.plot(sortedxy[:,0], sortedxy[:,1]);
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
P.annotate(str("a0:%0.2f\na1:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], p[3], r_value**2.)), (0.8,0.8), xycoords='axes fraction')
elif fn=='quadratic_amplitude_annual':
dataMask=~np.isnan(array3d[x, y,:])
p0=np.array([1,0,0,0,basic.nonan(array3d[x, y,:]).mean() ])
fitfun=lambda p: (p[0]+p[1]*xScat[dataMask]/365.+p[2]*(xScat[dataMask]/365.)**2. )* np.cos(2*np.pi*xScat[dataMask]/365.+p[3]) + p[4]
xScat2=np.linspace(xScat.min(),xScat.max())
fitfun2=lambda p: (p[0]+p[1]*xScat2/365.+p[2]*(xScat2/365.)**2.) * np.cos(2*np.pi*xScat2/365.+p[3]) + p[4]
#errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
if yerror3d is None:
w=np.ones(array3d[x, y,:].shape);
else:
w=basic.rescale(1./yerror3d[x,y,:], [1,2])
errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
#p=scipy.optimize.fmin_powell(errfun, p0)
p=scipy.optimize.leastsq(errfun, p0);
p=p[0];
P.scatter(xScat[dataMask], fitfun(p), marker='^');
sortedxy= np.squeeze(np.dstack([xScat2, fitfun2(p)]));
sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
P.plot(sortedxy[:,0], sortedxy[:,1]);
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
P.annotate(str("a0:%0.2f\na1:%0.2f\na2:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], p[3], p[4], r_value**2.)), (0.8,0.8), xycoords='axes fraction')
elif fn=='annual':
dataMask=~np.isnan(array3d[x, y,:])
p0=np.array([1,1,basic.nonan(array3d[x, y,:]).mean() ])
fitfun=lambda p: p[0]* np.cos(2*np.pi*xScat[dataMask]/365.+p[1]) + p[2]
xScat2=np.linspace(xScat.min(),xScat.max())
fitfun2=lambda p: p[0]* np.cos(2*np.pi*xScat2/365.+p[1]) + p[2]
#errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
if yerror3d is None:
w=np.ones(array3d[x, y,:].shape);
else:
w=basic.rescale(1./yerror3d[x,y,:], [1,2])
errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
#p=scipy.optimize.fmin_powell(errfun, p0)
p=scipy.optimize.leastsq(errfun, p0);
p=p[0];
P.scatter(xScat[dataMask], fitfun(p), marker='^');
sortedxy= np.squeeze(np.dstack([xScat2, fitfun2(p)]));
sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
P.plot(sortedxy[:,0], sortedxy[:,1]);
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
P.annotate(str("amp:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], r_value**2.)), (0.8,0.8), xycoords='axes fraction')
else:
p=None
P.scatter(xScat, fn(xScat), marker='^');
#convert axis to date...
if dateaxis:
try:
P.figure(fh.number).axes[0].xaxis_date(tz=None)
P.figure(fh.number).autofmt_xdate()
except:
pass
#change x y to xMap, yMap
if yMap is not None:
xM=ya*x+yb;
else:
xM=x;
if xMap is not None:
yM=xa*(y)+xb;
else:
yM=y;
#x and y are flipped in the try/except block above. So Flip again.
#if p is not None:
# P.title("x,y,[]: " + str(yM) + ", " + str(xM) + ', ' + str(p) )
#else:
P.title("x,y,z,z.std: " + str(yM) + ", " + str(xM) + ', ' + str(array2d[x,y]) +', ' + str(np.std(basic.nonan(array3d[x, y,:]))) )
# rotate and align the tick labels so they look better
#P.figure(fh.number).autofmt_xdate()
# use a more precise date string for the x axis locations in the
# toolbar
#P.gca().fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
if xerror3d is None:
xerr=None;
else:
xerr=xerror3d[x,y,:]
if yerror3d is None:
yerr=None;
else:
yerr=yerror3d[x, y,:]
if modelError:
yerr=yerror3d[x, y,:]
yerr[dataMask]=errfun(p)
P.errorbar(xScat,array3d[x, y,:], xerr=xerr, yerr=yerr, fmt='ro');
if ylimScat is not None:
P.ylim(ylimScat);
##################################
## END OF PLOTTING
##################################
s=array2d[~np.isnan(array2d)].std();
m=array2d[~np.isnan(array2d)].mean();
fig=P.figure();ax=fig.add_subplot(111);ax.matshow(array2d, vmin=m-s, vmax=m+s);
#fig=P.figure();ax=fig.add_subplot(111);ax.matshow(basic.wrapToInt(array2d, s), vmin=-s, vmax=s);
if xMap is not None:
ticks=ax.get_xticks();
(xa,xb)=np.polyfit(np.r_[0:len(xMap)],xMap,1)
ax.set_xticklabels(np.around(xa*ticks+xb,4));
if yMap is not None:
ticks=ax.get_yticks();
(ya,yb)=np.polyfit(np.r_[len(yMap):0:-1],yMap,1)
ax.set_yticklabels(np.around(ya*ticks+yb,4));
#P.colorbar();
cax,kw=P.matplotlib.colorbar.make_axes(ax,orientation='vertical')
P.matplotlib.colorbar.ColorbarBase(cax, cmap=P.jet(),
norm=P.normalize(vmin=m-s,vmax=m+s),
orientation='vertical')
fh=P.figure(); #should be accessible in child function?
fig.canvas.mpl_connect('button_press_event', onclick);
return (fig,fh)
def linkedImShow(im1, im2, **kwargs):
"""(ax1,ax2)=linkedImShow(im1, im2, **imshow_kwargs)
"""
P.figure();
ax1 = P.subplot(111)
P.figure();
ax2 = P.subplot(111, sharex=ax1, sharey=ax1)
#ax1.set_adjustable("box-forced")
#ax2.set_adjustable("box-forced")
#arr1 = np.arange(100).reshape((10, 10))
ax1.imshow(im1, **kwargs)
#arr2 = np.arange(100, 0, -1).reshape((10, 10))
ax2.imshow(im2, **kwargs)
return (ax1, ax2)
def linkaxes(ax1, ax2):
ax2.set(sharex=ax1, sharey=ax1)
def linkfigures(fg1, fg2):
ax1=fg1.get_axes()[0]
ax2=fg2.get_axes()[0]
ax2.set(sharex=ax1, sharey=ax1)
def frankotchellappa(dzdx,dzdy):
'''frankotchellappa(dzdx,dzdy):
'''
dS=dzdx.shape;
cols=dS[1];
rows=dS[0];
[wx, wy] = np.meshgrid((np.r_[1:cols+1]-(np.fix(cols/2)+1))/(cols-np.mod(cols,2)),(np.r_[1:rows+1]-(np.fix(rows/2)+1))/(rows-np.mod(rows,2)));
wx = np.fft.ifftshift(wx); wy = np.fft.ifftshift(wy);
DZDX = np.fft.fft2(dzdx);
DZDY = np.fft.fft2(dzdy);
eps=np.finfo(np.double).eps;
Z = (-1j*wx*DZDX -1j*wy*DZDY)/(wx**2 + wy**2 + eps)
z = np.fft.ifft2(Z).real;
return z
def matscale(array, m=None, s=None, **kwargs):
''' matscale(array, m=None, s=None, **kwargs)
Ex: matscale(kum.topoA[:,:,0])
'''
if not m:
m=array.mean()
if not s:
s=array.std()
return P.matshow(array, vmin=m-s, vmax=m+s, **kwargs);
def matshowN(array, axis=None, titles=None, matshow_kw=dict(), subplot_kw=dict(frame_on=False), **kwargs):
''' matshowN(array, axis=None, titles=None, subplot_kw=dict(frame_on=False), **kwargs)
Ex: f=matshowN(angle(kum.iG.cint[:]))
f.tight_layout()
basic.graphics.matshowN((residual/stdpha)**2., matshow_kw={'vmin':0, 'vmax':1})
axis: If not specified, uses the axis with minimum elements.
axis=argmin(array.shape)
'''
if axis is None:
axis=np.argmin(array.shape);
num=array.shape[axis];
lw=int(np.ceil(np.sqrt(num)));
f,axarr=P.subplots(lw,lw,sharex='col',sharey='row', subplot_kw=subplot_kw, **kwargs)
array=np.rollaxis(array, axis, 0);
k=0;
for ax in axarr.ravel():
if k >= num:
ax.set_axis_off();
continue
ax.matshow(array[k,:,:], **matshow_kw);
if titles is None:
ax.set_title(str('%d'%k));ax.set_axis_off();
else:
ax.set_title(titles[k]);ax.set_axis_off();
k=k+1;
return f;
def frankotchellappaiter(dzdx,dzdy,weight=None, threshold=0.1, maxiter=10):
'''frankotchellappaiter(dzdx,dzdy):
'''
dS=dzdx.shape;
if weight is None:
weight=np.ones(dS);
cols=dS[1];
rows=dS[0];
[wx, wy] = np.meshgrid((np.r_[1:cols+1]-(np.fix(cols/2)+1))/(cols-np.mod(cols,2)),(np.r_[1:rows+1]-(np.fix(rows/2)+1))/(rows-np.mod(rows,2)));
wx = np.fft.ifftshift(wx); wy = np.fft.ifftshift(wy);
dx=dzdx.copy()
dy=dzdy.copy()
nfilt=9.0
corrFilter=np.array([[1,1,1],[1,1,1],[1,1,1]])/nfilt #http://docs.scipy.org/doc/scipy-0.7.x/reference/tutorial/ndimage.html
for k in xrange(maxiter):
dx=scipy.ndimage.filters.correlate(dx,corrFilter,mode='nearest')
dy=scipy.ndimage.filters.correlate(dy,corrFilter,mode='nearest')
z=frankotchellappa(dx,dy)
gx,gy=np.gradient(z);
rx=(weight*(gx/dzdx)).mean()
ry=(weight*(gy/dzdy)).mean()
dx=dzdx/rx
dy=dzdy/ry
z=frankotchellappa(dx,dy)
gx,gy=np.gradient(z);
c = weight*np.sqrt((dzdx-gx)**2+(dzdy-gy)**2)
if c.mean()<threshold:
print("cost: ", c.mean())
break
else:
print("cost: ", c.mean())
dx=dx+scipy.ndimage.filters.correlate(dzdx-gx,corrFilter, mode='nearest')
dy=dy+scipy.ndimage.filters.correlate(dzdy-gy,corrFilter, mode='nearest')
return z
def frankotchellappaiter2(dzdx,dzdy):
'''frankotchellappaiter2(dzdx,dzdy):
Ex:
dzdx,dzdy=insar.cpxgradient(pf)
uw=basic.plot.frankotchellappaiter2(dzdy,dzdx);
matshow(angle(pf*exp(-1j*uwxy)));
'''
import kabum
z=kabum.frankotchellappa(dzdx,dzdy);
gy,gx=np.gradient(z);
rx=basic.nonaninf(gx/dzdx).mean()
ry=basic.nonaninf(gy/dzdy).mean()
z=kabum.frankotchellappa(dzdx/rx,dzdy/ry);
gy,gx=np.gradient(z);
return z
def histogram_matching(inputArr, histogramArr=None, bins=100, zpdf=None, zbins=None):
"""histogram_matching(inputArr, histogramArr=None, bins=100, zpdf=None, zbins=None)
"""
if (histogramArr is None) and (zpdf is None):
print('Error: histogramArr or zpdf has to be specified')
return
if (bins<=1) and (zbins is None):
print('Skipping histogram matching: bins<=1')
return inputArr
if len(zbins)<=1:
print('Skipping histogram matching: len(zbins)<=1')
return inputArr
dS=inputArr.shape
lenS=inputArr.size
s=basic.nonan(inputArr.ravel())
#Limit matching to majority of the pixels.. We don't want a long trail.
sm=s.mean()
ss=s.std()
sbins=P.np.linspace(sm-3*ss,sm+3*ss,bins+1);
spdf, sbins=P.np.histogram(s, sbins)
spdf=spdf/P.np.double(sum(spdf))
sk= P.np.cumsum(spdf) #spdf * P.np.triu(P.np.ones(dS)) #CDF
#Histogram to be matched
if zpdf is None:
dZ=histogramArr.shape
lenZ=histogramArr.size
z=basic.nonan(histogramArr.ravel())
zm=z.mean()
zs=z.std()
zbins=P.np.linspace(zm-3*zs,zm+3*zs,bins+1);
zpdf, zbins=P.np.histogram(z, zbins)
else:
#make zpdf match the length of bins
zpdf=np.interp(sbins, np.linspace(zbins[0], zbins[-1], zpdf.shape[0]), zpdf);
zbins=sbins #zbins no longer needed?.
zpdf=zpdf/P.np.double(sum(zpdf))
zk= P.np.cumsum(zpdf) #G(z), CDF
#create the image
p_prev=0
z0=P.np.empty(dS)
z0[:]=P.np.nan
for q in xrange(0,bins):
for p in xrange(p_prev,bins):
if zk[p] >= sk[q]:
#print ['replacing from ', sbins[q], ' to ', sbins[q+1] , ' with ', zbins[p]]
p_prev=p+1
q_last=q
#z0[ P.np.ma.mask_or(inputArr>sbins[q], inputArr<sbins[q+1]) ] = zbins[p];
if q==0:
z0[ inputArr<sbins[q+1] ] = zbins[p];
else:
z0[ ((inputArr>=sbins[q]).astype(P.np.int) * (inputArr<sbins[q+1]).astype(P.np.int)).astype(P.np.bool) ] = zbins[p];
#print ['replacing ', ((inputArr>sbins[q]).astype(P.np.int) * (inputArr<sbins[q+1]).astype(P.np.int)).sum(), ' pixels'];
break #inner for
#print('q %f p %f zk %f sk %f' %(q,p,zk[p], sk[q]))
z0[inputArr>=sbins[q_last]]=zbins[p]
return z0
def histogramMap(arrIn, **kwargs):
'''histogramMap(arr, **kwargs)
Returns an array where the values are replaced with
histogram values.
'''
arr=arrIn.copy();
pdf,bins=P.np.histogram(arr, **kwargs);
for b in xrange(len(bins)):
if b == 0:
arr[arr<bins[b]]=pdf[b];
elif b==len(bins)-1:
arr[arr>bins[b]]=pdf[b-1];
else:
arr[ (arr>bins[b])&(arr<bins[b+1]) ] = pdf[b];
return arr
def sensitivity_plot(lvar, lnames):
"""fg=sensitivity_plot(lvar, lnames)
Creates a plot for parameter correlation. The design of the plot is after the InSAR group at Oxford (Parsons)
"""
#Now the hard part... Plotting
fg=P.figure()
#First we need to know how many variables (nvar)
#nvel, nf, nalpha, A, B, E, and result(H)
nvar=len(lvar)
#now put all variables in a list
#lvar=[nvel,nf,nalpha,nA,nB,nE,nH]
#list of names
#lnvar=['vel','f','slope','A','B','E','H']
lnvar=lnames
#now we create subplot (nvar-1 x nvar-1)
# for k in xrange(nvar-1):
# for l in xrange(nvar-1-k):
# ax = P.subplot(nvar-1, nvar-1, l*(nvar-1)+k+1, ) # aspect='equal',autoscale_on=False, xlim=[1,3], ylim=[1,3])
# P.scatter(lvar[k], lvar[l+k+1], 5, c='k', marker='.');
# P.axis('tight')
# P.gca().xaxis.get_major_locator()._nbins=4
# P.gca().yaxis.get_major_locator()._nbins=4
# P.xlabel(lnvar[k]);
# P.ylabel(lnvar[l+k+1]);
for k in xrange(nvar-1):
for l in xrange(k+1):
plnum=k*(nvar-1)+l+1 #plotnumber
if plnum == (nvar-1)**2:
kk=nvar -2
else:
kk=plnum % (nvar-1) -1
ll=N.int(N.ceil(plnum/float(nvar-1)))
ax = P.subplot(nvar-1, nvar-1, plnum, ) # aspect='equal',autoscale_on=False, xlim=[1,3], ylim=[1,3])
P.scatter(lvar[kk], lvar[ll], 5, c='k', marker='.');
P.axis('tight')
P.gca().xaxis.get_major_locator()._nbins=4
P.gca().yaxis.get_major_locator()._nbins=4
P.xlabel(lnvar[kk]);
P.ylabel(lnvar[ll]);
P.tight_layout(pad=0.005, w_pad=0.005, h_pad=0.005)
return fg
def manual_translation(master, slave):
def onrelease(event):
global coord
try:
x=np.round(event.xdata);
except:
return
y=np.round(event.ydata);
imshow_box(fig,master,x,y, s);
coord=(x,y);
def onkeypress(event):
#print('you pressed', event.key, event.xdata, event.ydata)
global coord
x,y=coord
if event.key == "up":
y=y-1;
elif event.key == "down":
y=y+1;
elif event.key =="left":
x=x-1
elif event.key == "right":
x=x+1
coord=(x,y);
imshow_box(fig,master,x,y, s);
def imshow_box(f,im, x,y,s):
'''imshow_box(f,im, x,y,s)
f: figure
im: image
x: center coordinate for box
y: center coord
s: box shape, (width, height)
'''
global coord
P.figure(f.number)
P.clf();
P.imshow(im);
P.axhline(y-s[1]/2.)
P.axhline(y+s[1]/2.)
P.axvline(x-s[0]/2.)
P.axvline(x+s[0]/2.)
xy=crop(m,s,y,x)
coord=(0.5*(xy[2]+xy[3]), 0.5*(xy[0]+xy[1]))
P.title(str('x: %d y: %d' % (x,y)));
P.figure(999);
P.imshow(master[xy[0]:xy[1],xy[2]:xy[3]])
P.title('Master');
P.figure(998);
df=(master[xy[0]:xy[1],xy[2]:xy[3]]-slave)
P.imshow(np.abs(df))
P.title(str('RMS: %0.6f' % np.sqrt((df**2.).mean()) ));
def crop(m,s,x,y):
xy=[round(k) for k in [x-s[0]/2. , x+s[0]/2. , y-s[1]/2., y+s[1]/2.]]
#print xy
if np.any(xy<0):
if xy[0]<0:
xy[1]=xy[1]-xy[0];xy[0]=0;
if xy[2]<0:
xy[3]=xy[3]-xy[2];xy[2]=0;
if xy[1]>m[0]:
xy[0]=xy[0]-(xy[1]-m[0]);xy[1]=m[0];
if xy[3]>m[1]:
xy[2]=xy[2]-(xy[3]-m[1]);xy[3]=m[1];
return xy
s=slave.shape
m=master.shape
fig=P.figure();
imshow_box(fig,master,m[0]*0.5,m[1]*0.5,s);
coord=(m[0]*0.5,m[1]*0.5)
P.figure();
P.imshow(slave);P.title('Slave');
fig.canvas.mpl_connect('button_release_event', onrelease);
fig.canvas.mpl_connect('key_press_event', onkeypress);
return fig
def manual_translation_scatter(master, sx,sy,sz, dotsize=1):
def onrelease(event):
if event.button !=3:
return
global coord
try:
x=np.round(event.xdata);
except:
return
y=np.round(event.ydata);
imshow_box(fig,master,x,y, s);
coord=(x,y);
def onkeypress(event):
#print('you pressed', event.key, event.xdata, event.ydata)
global coord
x,y=coord
if event.key == "up":
y=y-1;
elif event.key == "down":
y=y+1;
elif event.key =="left":
x=x-1
elif event.key == "right":
x=x+1
coord=(x,y);
imshow_box(fig,master,x,y, s);
def imshow_box(f,im, x,y,s):
'''imshow_box(f,im, x,y,s)
f: figure
im: image
x: center coordinate for box
y: center coord
s: box shape, (width, height)
'''
global coord
P.figure(f.number)
P.clf();
P.imshow(im);
P.axhline(y)
P.axhline(y+s[1])
P.axvline(x)
P.axvline(x+s[0])
P.scatter(sx+x,sy+y, dotsize, sz, edgecolor='none');
coord=(x,y);
P.title(str('x: %d y: %d' % (x,y)));
s=(sx.max()-sx.min() , sy.max()-sy.min() )
m=master.shape
fig=P.figure();
imshow_box(fig,master,m[0]*0.5,m[1]*0.5,s);
coord=(m[0]*0.5,m[1]*0.5)
fig.canvas.mpl_connect('button_release_event', onrelease);
fig.canvas.mpl_connect('key_press_event', onkeypress);
return fig
def z2rgb(z):
z=abs(np.log10(z))*np.exp(1j*np.angle(z));
r = abs(z);
[d1,d2]=z.shape;
a = np.sqrt(1/6)*np.real(z);
b = np.sqrt(1/2)*np.imag(z);
d = 1./(1+r**2);
R = 1/2 + np.sqrt(2/3)*np.real(z)*d;
G = 1/2 - d*(a-b);
B = 1/2 - d*(a+b);
d = 1/2 - r*d;
d[r<1] = -d[r<1];
C=np.zeros([d1,d2,3]);
C[:,:,0] = R + d;
C[:,:,1] = G + d;
C[:,:,2] = B + d;
f=P.figure();
P.imshow(C);
return f
def z2rgba(z):
z=abs(np.log10(z))*np.exp(1j*np.angle(z));
r = abs(z);
[d1,d2]=z.shape;
a = np.sqrt(1/6)*np.real(z);
b = np.sqrt(1/2)*np.imag(z);
d = 1./(1+r**2);
R = 1/2 + np.sqrt(2/3)*np.real(z)*d;
G = 1/2 - d*(a-b);
B = 1/2 - d*(a+b);
d = 1/2 - r*d;
d[r<1] = -d[r<1];
C=np.zeros([d1,d2,3]);
C[:,:,0] = R + d;
C[:,:,1] = G + d;
C[:,:,2] = B + d;
f=P.figure();
P.imshow(C);
return f
def distance_from_colorbar(im, c=P.cm.jet(np.arange(256))):
""" This function calculates a distance value for all pixels of an image given a colorbar.
It also returns the best fitting colorbar class for each pixel.
e.g.:
import cv2
im=cv2.cvtColor(cv2.imread('REsults_zonaSur_cut.tiff'), cv2.COLOR_BGR2RGB);
dist, classes=distance_from_colorbar(im, c=P.cm.jet(np.arange(256)));
dist, classes=distance_from_colorbar(im, c=P.cm.gray(linspace(0,1,8)));
"""
import time
N=c.shape[0];
cmax=2**np.round(np.log2(im.max()))
if c.max() != cmax:
c256=c*cmax/c.max(); #colorbar in uint8
else:
c256=c;
dist=np.ones((im.shape[0], im.shape[1]))*np.inf;
distOld=dist.copy();
classes=np.zeros((im.shape[0], im.shape[1]));
ccc=0; #current class counter
t0=0; #time.time(); #show 0.0 at the beginning of the loop.
for k in c256:
dist =np.dstack([dist, np.sqrt( (im[:,:,0]-k[0])**2. + (im[:,:,1]-k[1])**2. + (im[:,:,2]-k[2])**2. )]).min(2);
classes[dist!=distOld]=ccc;
ccc=ccc+1;distOld=dist;
if basic.progresstime(t0):
basic.progress(ccc,N);t0=time.time();
return dist, classes;
def data_from_image(im, classes, mask=None, limits=[0.,1.]):
""" This function extracts the data from a image, based on the classes and a mask.
Once the data is masked, a look-up-table is used to set values to the classes.
"""
import time;
#set_trace()
N=int (basic.nonan(classes).max()+1) ; # zero is a class...
if mask is not None:
classes[mask]=np.nan
classLimits=np.linspace(np.min(limits), np.max(limits), N);
cmax=2**np.round(np.log2(im.max()));
imSingle=im[:,:,0]*(cmax**0)+im[:,:,1]*(cmax**1)+im[:,:,2]*(cmax**2.);
data=np.zeros(imSingle.shape);
t0=0;
for k in range(N-1):
#for each class do a linear interpolation
if np.any(classes==k):
if np.std(imSingle[classes==k]) == 0:
data[classes==k]=classLimits[k:k+2].mean()
else:
data[classes==k]=basic.rescale(imSingle[classes==k], classLimits[k:k+2], quiet=True);
if basic.progresstime(t0):
basic.progress(k,N); t0=time.time();
if mask is not None:
data[mask]=np.nan;
data[data>classLimits[-1]]=classLimits[-1]
data[data<classLimits[0]] =classLimits[0]
return data;
def class2data(classes, c=P.cm.jet(np.arange(256)), limits=[0., 1.]):
""" Find the closest value in the colormap.
"""
N=c.shape[0];
data=classes/N*[np.max(limits)-np.min(limits)]+np.min(limits)
return data;
#http://stackoverflow.com/questions/1679126/how-to-plot-an-image-with-non-linear-y-axis-with-matplotlib-using-imshow/6788842#6788842
def scalogram(data, vmin=None, vmax=None, linear=False):
""" scalogram(data, vmin=None, vmax=None)
Plots a scalogram for wavelet transforms generated by
pywt.wavedec using imshow.
"""
bottom = 0
if not vmin:
vmin = min(map(lambda x: min(abs(x)), data))
if not vmax:
vmax = max(map(lambda x: max(abs(x)), data))
if linear:
scale=1./len(data);
P.gca().set_autoscale_on(False)
for row in range(0, len(data)):
if not linear:
scale = 2.0 ** (row - len(data))
P.imshow(
np.array([abs(data[row])]),
interpolation = 'nearest',
vmin = vmin,
vmax = vmax,
extent = [0, 1, bottom, bottom + scale])
bottom += scale
def scalogram2(data, vmin=None, vmax=None, max_level=None, scale=None, percent=False, verbose=False):
""" scalogram2(data, vmin=None, vmax=None)
Plots a 2D scalogram for wavelet transforms generated by
pywt.wavedec2 using imshow.
"""
if percent==True:
maxData=[]
sumData=[]
sumData.append(abs(data[0]).sum());
#sumData=0;
maxData.append(abs(data[0]).max())
for row in range(1,len(data)):
sumData.append(abs(data[row][0]).sum()); sumData.append(abs(data[row][1]).sum()); sumData.append(abs(data[row][2]).sum());
maxData.append(abs(data[row][0]).max()); maxData.append(abs(data[row][1]).max()); maxData.append(abs(data[row][2]).max());
sumData=np.array(sumData)
maxData=np.array(maxData)
multiplier=1./maxData;
if verbose:
print('sumData');print(sumData);
print('maxData');print(maxData);
print('multiplier'); print(multiplier);
else:
multiplier=np.ones([1,len(data)*3+1]);
if not max_level:
max_level=len(data);
if scale=='power' or scale=='logpower':
ppower=(len(data)+1)**2*np.ones(data[0].shape);
approx=abs(data[0])*multiplier[0];
#P.matshow(approx);P.title('Preloop');
for row in range(1,len(data)):
#scale = 2.0 ** (row - len(data))
#print row
#print approx.shape
#print data[row][0].shape
if approx.shape[0] != data[row][0].shape[0]:
approx=basic.resize(approx, data[row][0].shape);
if scale=='power' or scale=='logpower':
ppower=basic.resize(ppower, data[row][0].shape);
#print basic.resize
arr=np.zeros([ data[row][0].shape[0]*2, data[row][0].shape[1]*2] )
#print arr.shape
#print approx.shape
#P.matshow(approx);P.title(['Row', str(row)]);
if verbose:
print('0 Min: %E Max: %E' % (abs(data[row][0]).min(), abs(data[row][0]).max()))
print('1 Min: %E Max: %E' % (abs(data[row][1]).min(), abs(data[row][1]).max()))
print('2 Min: %E Max: %E' % (abs(data[row][2]).min(), abs(data[row][2]).max()))
arr[0:approx.shape[0], 0:approx.shape[1]]=approx;
arr[0:approx.shape[0], approx.shape[1]: ]=abs(data[row][0])*multiplier[row*3-2];
arr[approx.shape[0]: , 0:approx.shape[1]]=abs(data[row][1])*multiplier[row*3-1];
arr[approx.shape[0]: , approx.shape[1]: ]=abs(data[row][2])*multiplier[row*3];
approx=arr; #save for next loop.
if scale=='power' or scale=='logpower':
power=(len(data)+1-row)**2*np.ones([ data[row][0].shape[0]*2, data[row][0].shape[1]*2] )
power[0:ppower.shape[0], 0:ppower.shape[1]]=ppower;
ppower=power;
if row==max_level:
break
if not vmin:
vmin = min(arr.ravel())
if not vmax:
vmax = max(arr.ravel())
P.figure();
P.gca().set_autoscale_on(False)
if scale=='log':
P.imshow(
10*np.log10(arr),
interpolation = 'nearest',
vmin = 10*np.log10(vmin),
vmax = 10*np.log10(vmax),
extent = [0, 1, 0, 1])
P.colorbar();
P.title('Logscale')
elif scale=='sqrt':
P.imshow(
np.sqrt(arr),
interpolation = 'nearest',
vmin = np.sqrt(vmin),
vmax = np.sqrt(vmax),
extent = [0, 1, 0, 1])
P.colorbar();
P.title('SQRT')
elif scale=='power':
vmin = min((arr/power).ravel())
vmax = max((arr/power).ravel())
P.imshow(
arr/power,
interpolation = 'nearest',
vmin = vmin,
vmax = vmax,
extent = [0, 1, 0, 1])
P.colorbar();
P.title('Power')
elif scale=='logpower':
vmin = min((10*np.log10(arr/power)).ravel())
vmax = max((10*np.log10(arr/power)).ravel())
P.imshow(
10*np.log10(arr/power),
interpolation = 'nearest',
vmin = vmin,
vmax = vmax,
extent = [0, 1, 0, 1])
P.colorbar();
P.title('LogPower')
else:
P.imshow(
arr,
interpolation = 'nearest',
vmin = vmin,
vmax = vmax,
extent = [0, 1, 0, 1])
P.colorbar();
P.title('Linear scale')
if verbose:
print('vmin: %E, vmax: %E ' %(vmin, vmax))
#Draw Dividers
for r in xrange(1,max_level):
s = 2.0 ** (r - max_level); #s=scale, but scale is used above and below
lim=2.*s;
#print scale
P.plot([s, s],[1-lim, 1], 'r', linewidth=4)
P.plot([0, lim],[1-s, 1-s], 'r', linewidth=4)
if scale=='power' or scale=='logpower':
P.matshow(power);P.colorbar();P.title('Denominator');
return arr
def mapshow(lon,lat, z):
from mpl_toolkits.basemap import Basemap
if lat.ndim==1:
d0,d1=numpy.meshgrid(numpy.linspace(lat.min(),lat.max(), lon.shape[1]),
numpy.linspace(lon.min(), lon.max(), lon.shape[0]))
z = interpolate.griddata((lon.ravel(),lat.ravel()), z.ravel(), (d1, d0), method='linear')
else:
d0=lon;d1=lat;
if lat[0,0] > 0: #northern hemisphere
if d1[0,0]<d1[-1,0]:
d1=numpy.flipud(d1)
print ('Please wait... Generating map\n')
m = Basemap(llcrnrlon=d0.min(), llcrnrlat=d1.min(), urcrnrlon=d0.max(), urcrnrlat=d1.max(),
resolution='f', area_thresh=1., projection='cyl')
m.imshow(z, interpolation='nearest', origin='upper')
m.drawcoastlines(color='w',linewidth=0.8)
m.drawmapboundary() # draw a line around the map region
m.drawrivers()
m.drawparallels(numpy.arange(int(d1.min()), int(d1.max()), 1),linewidth=0.2,labels=[1,0,0,0])
m.drawmeridians(numpy.arange(int(d0.min()), int(d0.max()), 1),linewidth=0.2,labels=[0,0,0,1])
| gpl-2.0 |
eggplantbren/StatisticalCompass | transform.py | 2 | 1105 | import numpy as np
import pandas as pd
def transform(pos, questions):
"""
Takes the user's position and scales it, with some consideration to
the *available* distance in each direction. The method is ad-hoc and may
remain so.
"""
# # Old method
# # Mean and sd of question vectors
# mu = np.mean(questions.iloc[:,1:].values, axis=0)
# sd = np.std(questions.iloc[:,1:].values, axis=0)
# pos = ((pos - mu)/sd)/np.sqrt(questions.shape[0])
# pos = np.sign(pos)*np.sqrt(np.abs(pos)) # Take square root for moderation
# return [pos, mu, sd]
# New method
most_extreme = 2.*np.sum(np.abs(questions.iloc[:,1:].values), axis=0)
# pos is in [-most_extreme, most_extreme]
result = pos/most_extreme # \in [-1, 1]
#result = result**3 # Nonlinear transformation if desired (must be odd)
result = 5.*result # Transform to [-5, 5]
return [result, most_extreme]
if __name__ == '__main__':
"""
Load the questions and print the mean and sd of the question vectors
"""
questions = pd.read_csv('questions.csv')
[pos, most_extreme] = transform(np.array([14.2, -3.1, -1.4]), questions)
print(pos)
| mit |
dssg/wikienergy | disaggregator/build/pandas/pandas/compat/pickle_compat.py | 15 | 2829 | """ support pre 0.12 series pickle compatibility """
import sys
import numpy as np
import pandas
import copy
import pickle as pkl
from pandas import compat, Index
from pandas.compat import u, string_types
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if type(args[0]) is type:
n = args[0].__name__
try:
stack[-1] = func(*args)
return
except Exception as e:
# if we have a deprecated function
# try to replace and try again
if '_reconstruct: First argument must be a sub-type of ndarray' in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except:
pass
# try to reencode the arguments
if getattr(self,'encoding',None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args])
try:
stack[-1] = func(*args)
return
except:
pass
if getattr(self,'is_verbose',None):
print(sys.exc_info())
print(func, args)
raise
stack[-1] = value
if compat.PY3:
class Unpickler(pkl._Unpickler):
pass
else:
class Unpickler(pkl.Unpickler):
pass
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
# py3 compat
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except:
pass
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except:
raise
| mit |
esiivola/GPYgradients | GPy/kern/src/static.py | 2 | 10668 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def _save_to_input_dict(self):
input_dict = super(Static, self)._save_to_input_dict()
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X, cov=False):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(White, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.White"
return input_dict
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class WhiteHeteroscedastic(Static):
def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):
"""
A heteroscedastic White kernel (nugget/noise).
It defines one variance (nugget) per input sample.
Prediction excludes any noise learnt by this Kernel, so be careful using this kernel.
You can plot the errors learnt by this kernel by something similar as:
plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance))
"""
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', np.ones(num_data) * variance, Logexp())
self.link_parameters(self.variance)
def to_dict(self):
input_dict = super(WhiteHeteroscedastic, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.WhiteHeteroscedastic"
return input_dict
def Kdiag(self, X):
if X.shape[0] == self.variance.shape[0]:
# If the input has the same number of samples as
# the number of variances, we return the variances
return self.variance
return 0.
def K(self, X, X2=None):
if X2 is None and X.shape[0] == self.variance.shape[0]:
return np.eye(X.shape[0]) * self.variance
else:
return 0.
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.diagonal(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(Bias, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Bias"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Bias(**input_dict)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
return np.full(shape, self.variance, dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)
def psi2n(self, Z, variational_posterior):
ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
if dL_dpsi2.ndim == 2:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])
else:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum())
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
if X2 is None:
return self.variance * self.fixed_K
else:
return np.zeros((X.shape[0], X2.shape[0]))
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
else:
self.variance.gradient = 0
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Precomputed(Fixed):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):
"""
Class for precomputed kernels, indexed by columns in X
Usage example:
import numpy as np
from GPy.models import GPClassification
from GPy.kern import Precomputed
from sklearn.cross_validation import LeaveOneOut
n = 10
d = 100
X = np.arange(n).reshape((n,1)) # column vector of indices
y = 2*np.random.binomial(1,0.5,(n,1))-1
X0 = np.random.randn(n,d)
k = np.dot(X0,X0.T)
kern = Precomputed(1,k) # k is a n x n covariance matrix
cv = LeaveOneOut(n)
ypred = y.copy()
for train, test in cv:
m = GPClassification(X[train], y[train], kernel=kern)
m.optimize()
ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims"
super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)
@Cache_this(limit=2)
def _index(self, X, X2):
if X2 is None:
i1 = i2 = X.astype('int').flat
else:
i1, i2 = X.astype('int').flat, X2.astype('int').flat
return self.fixed_K[i1,:][:,i2]
def K(self, X, X2=None):
return self.variance * self._index(X, X2)
def Kdiag(self, X):
return self.variance * self._index(X,None).diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
| bsd-3-clause |
guihh/CmpCalib_matplotlib | CmpCalib_matplotlib.py | 1 | 11089 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# sudo apt-get install python3-tk
# sudo pip3 install numpy / scipy / matplotlib
#
import os
import sys
import argparse
import numpy as np
import re
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from scipy.interpolate import griddata
from collections import OrderedDict
class Calibration:
def __init__(self):
self.path = ''
self.e_radiaux=[]
self.e_plani=[]
self.e_plani_max=0
self.random_color = np.asarray( np.random.rand(3) )
return
def load(self):
file=open(self.path,'r')
for row in file :
if re.match('\s\d', row) :
self.e_radiaux.append(map(float,row.split()))
elif re.match('\d', row):
self.e_plani.append(map(float,row.split()))
return
def set_path(self,path):
self.path=path
return
def set_e_plani_max(self):
self.e_plani_max = np.max(np.asarray(self.e_plani)[:,4])
return
class Comparaison:
def __init__(self):
self.path_calibs = []
self.calibrations = OrderedDict()
self.max_all_e_plani=[]
self.max_scale= 1.0
self.is_max_scale_defined=False
self.linewidth = 1.75
self.fontsize = 17
self.nbclass=50
self.scale_output=1.0
self.plot = plt.figure(1)
self.plot.set_size_inches(20,15)
self.ratio = 0.2
self.dir_output=''
self.interpolation = 'linear'
self.width_arrows = 0.003
return
#Instances all Calibs
def load(self):
for path_calib in self.path_calibs:
self.calibrations[path_calib] = Calibration() #Instance
self.calibrations[path_calib].set_path(path_calib) # set path
self.calibrations[path_calib].load() # parse txt file
self.calibrations[path_calib].set_e_plani_max() # store the max euclidian dist
self.max_all_e_plani.append(self.calibrations[path_calib].e_plani_max) # store in Comparaison object the maximum objet between all file to plot to share same scale
return
def get_args(self):
try:
parser = argparse.ArgumentParser()
except:
parser.print_help()
sys.exit(0)
parser.add_argument( 'Calibrations' , help = 'Absolute path of calibration file(s).' , type = str , nargs = '+' )
parser.add_argument( '-o' , '--output' , help = 'Absolute path to save' , type = str , action = "store", dest = 'output')
parser.add_argument( '-m' , '--max_scale' , help = 'Maximum deviation value to plot. Default is maximum deviation from input file(s).' , type = float , action = "store", dest = 'max_scale')
parser.add_argument( '-c' , '--nbclass' , help = 'Number of classes for the LUT. Default is 50.' , type = int , action = "store", dest = 'nbclass')
parser.add_argument( '-f' , '--fontsize' , help = 'Fontsize. Default is 17.' , type = int , action = "store", dest = 'fontsize')
parser.add_argument( '-l' , '--linewidth' , help = 'Width of the line to plot. Default is 1.75.' , type = float , action = "store", dest = 'linewidth')
parser.add_argument( '-r' , '--ratio' , help = 'Padding in each "Ecarts Planimetriques" frame (%% of maximum value X & Y). Useful to displayed whole plotted arrows. Default is 0.2.' , type = float , action = "store", dest = 'ratio')
parser.add_argument( '-sc' , '--scale_output' , help = 'coefficient to choose scale of image output.' , type = float , action = "store", dest = 'scale_output')
parser.add_argument( '-i' , '--interpolation_mode' , help = 'Choose between: {‘linear’, ‘nearest’, ‘cubic’}. Default is ''linear'' ', type = str , action = "store", dest = 'interpolation_mode')
parser.add_argument( '-w' , '--width_arrows' , help = 'Width of the arrows. Default is 0.03', type = float , action = "store", dest = 'width_arrows')
args = parser.parse_args()
self.path_calibs = args.Calibrations
self.dir_output = os.path.join(os.path.dirname(self.path_calibs[0]),'CmpCalib_plot.png')
if args.ratio is not None:
self.ratio = args.ratio
if args.output is not None:
self.dir_output = args.output
if args.max_scale is not None:
self.is_max_scale_defined=True
self.max_scale = args.max_scale
if args.nbclass is not None:
self.nbclass = args.nbclass
if args.fontsize is not None:
self.fontsize = args.fontsize
if args.linewidth is not None:
self.linewidth = args.linewidth
if args.scale_output is not None:
self.scale_output = args.scale_output
if args.interpolation_mode is not None:
self.interpolation = args.interpolation_mode
if args.width_arrows is not None:
self.width_arrows = args.width_arrows
return
def initialize_plot(self):
plt.clf() # clear
plt.ion()
#~ plt.show() #keep open
return
def plot_e_radiaux_plani(self):
gs = gridspec.GridSpec(7, 7)
for i,calibration in enumerate(self.calibrations.keys()):
#### plot ecart radiaux ####
#~ ax = plt.subplot2grid((4, 4), (0,0),colspan=4,rowspan=1)
ax = plt.subplot(gs[0:2, 0:7])
ax.plot(np.asarray(self.calibrations[calibration].e_radiaux)[:,0],np.asarray(self.calibrations[calibration].e_radiaux)[:,1],\
c=self.calibrations[calibration].random_color,label=os.path.basename(calibration),linewidth = self.linewidth) # plot ecart radiaux data
ax.tick_params( axis = 'both' , labelsize = self.fontsize-3)
ax.set_xlabel('Rayon (px)', horizontalalignment = 'center').set_fontsize(self.fontsize)
ax.set_ylabel( 'Ecarts radiaux (px)' ).set_fontsize(self.fontsize)
ax.legend(loc = 'upper left', prop = {'size': self.fontsize} )
ax.spines['top'].set_visible(False)
#~ ax.yaxis.set_ticks_position('left','right')
ax.xaxis.set_ticks_position('bottom')
#### plot ecart plani ####
if len(self.calibrations)==1:
ax1 = plt.subplot(gs[3:7, 1:6])
ax1.set_title(os.path.basename(calibration),fontsize=self.fontsize+4,fontweight='bold',color=self.calibrations[calibration].random_color,position=(0.5, 1-(self.ratio/5))) #if 1 file
elif len(self.calibrations)==2:
ax1 = self.plot.add_subplot(2,2,(i+1+2))
ax1.set_title(os.path.basename(calibration),fontsize=self.fontsize,fontweight='bold',color=self.calibrations[calibration].random_color,position=(0.5, 0.95-(self.ratio/2.8))) #if 2 files
else:
ax1 = self.plot.add_subplot(3,3,(i+len(self.calibrations)))
ax1.set_title(os.path.basename(calibration),fontsize=self.fontsize,color=self.calibrations[calibration].random_color,position=(0.5, 0.95-(self.ratio/3))) #if n files
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_xticks([min(np.asarray(self.calibrations[calibration].e_plani)[:,0]),max(np.asarray(self.calibrations[calibration].e_plani)[:,0])/2,max(np.asarray(self.calibrations[calibration].e_plani)[:,0])])
ax1.set_yticks([min(np.asarray(self.calibrations[calibration].e_plani)[:,1]),max(np.asarray(self.calibrations[calibration].e_plani)[:,1])/2,max(np.asarray(self.calibrations[calibration].e_plani)[:,1])])
if i==0:
ax1.set_ylabel('Ecarts Planimetriques (px)').set_fontsize(self.fontsize)
ax1.tick_params( axis = 'both' , labelsize = self.fontsize-3)
ax1.set_xlabel('Rayon (px)', horizontalalignment = 'center').set_fontsize(self.fontsize)
xi = np.linspace(np.min(np.asarray(self.calibrations[calibration].e_plani)[:,0])-np.max(np.asarray(self.calibrations[calibration].e_plani)[:,0])*(self.ratio), np.max(np.asarray(self.calibrations[calibration].e_plani)[:,0])*(1+self.ratio),400)
yi = np.linspace(np.min(np.asarray(self.calibrations[calibration].e_plani)[:,1])-np.max(np.asarray(self.calibrations[calibration].e_plani)[:,1])*(self.ratio), np.max(np.asarray(self.calibrations[calibration].e_plani)[:,1])*(1+self.ratio),400)
points = np.vstack((np.asarray(self.calibrations[calibration].e_plani)[:,0],np.asarray(self.calibrations[calibration].e_plani)[:,1])).T
values = np.asarray(self.calibrations[calibration].e_plani)[:,4]
zi = griddata(points,values,(xi[None,:], yi[:,None]), method=self.interpolation)
m = cm.ScalarMappable(cmap=cm.viridis_r)
if not self.is_max_scale_defined:
v = np.linspace(0, np.max(self.max_all_e_plani),self.nbclass,endpoint=True)
else:
v = np.linspace(0, self.max_scale,self.nbclass,endpoint=True)
m.set_array(v)
CS = plt.contourf(xi,yi,zi,v,cmap=m.get_cmap())
ax1.quiver(np.asarray(self.calibrations[calibration].e_plani)[:,0],np.asarray(self.calibrations[calibration].e_plani)[:,1],np.asarray(self.calibrations[calibration].e_plani)[:,2],np.asarray(self.calibrations[calibration].e_plani)[:,3],width=self.width_arrows)
plt.axis('equal')
plt.draw()
if i == (len(self.calibrations)-1):
cbar_ax = self.plot.add_axes([0.95, 0.15, 0.01, 0.7])
self.plot.colorbar(m,cax=cbar_ax)
plt.subplots_adjust(wspace=0.1, hspace=0)
#~ self.plot.set_size_inches( int(round(20*self.scale_output)) , int(round(15*self.scale_output)) )
self.plot.savefig(self.dir_output,dpi=90)
if __name__ == '__main__':
comparaison=Comparaison()
comparaison.get_args()
print('--- parsing file(s) ---')
comparaison.load()
comparaison.initialize_plot()
print('--- plotting ---')
comparaison.plot_e_radiaux_plani()
print('--- Saved to : '+ comparaison.dir_output )
| lgpl-3.0 |
raphaelshirley/regphot | regphot/analysis.py | 1 | 7554 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 13:42:05 2017
This script is for post processing data produced by other parts of the codebase.
It contains function definitions which may also be useful to other modules.
eventually this should write the models to a csv for use by XID+
@author: rs548
"""
from astropy.io import fits
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import pandas as pd
from scipy import stats, integrate
import seaborn as sns
sns.set(color_codes=True)
from os import listdir
from os import getcwd
from os import remove
import csv
def checkcatalogue(sdssid1,cat2):
#First get the SDSSid for the provided objID
matchfound = False
for source in cat2:
if sdssid1 == sdssid2:
matchfound = True
return matchfound
def comparecatalogues(cat1,cat2):
print('There are ', len(cat1), ' objects in catalogue 1')
print('There are ', len(cat2), ' objects in catalogue 2')
nmatch=0
for source1 in cat1:
if checkcatalogue(source1['SDSSid'],cat2):
nmatch += 1
print('There are ', nmatch, ' objects from catalogue 1 in catalogue 2')
def printGraphs(folder):
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
fig = plt.figure()
fig.suptitle(filename)
#norm=LogNorm(),
plt.subplot(131)
plt.imshow(images[1].data, cmap='gray', interpolation='none')
plt.title('Image')
plt.subplot(132)
plt.imshow(images[2].data, cmap='gray', interpolation='none')
plt.title('Model')
plt.subplot(133)
plt.imshow(images[3].data, cmap='gray', interpolation='none')
plt.title('Residual')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
def print5bandGraphs(folder,band):
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
fig = plt.figure()
fig.suptitle(filename)
#norm=LogNorm(),
plt.subplot(131)
plt.imshow(images[band + 1].data, cmap='gray', interpolation='none')
plt.title('Image')
plt.subplot(132)
plt.imshow(images[band + 6].data, cmap='gray', interpolation='none')
plt.title('Model')
plt.subplot(133)
plt.imshow(images[band + 11].data, cmap='gray', interpolation='none')
plt.title('Residual')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
def oneModel(output):
lognorm = True
image = fits.open(output)
fig = plt.figure()
fig.suptitle(output)
#norm=LogNorm(),
plt.imshow(image[1].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Image')
fig = plt.figure()
plt.imshow(image[2].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Model')
fig = plt.figure()
plt.imshow(image[3].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Residual')
image.close()
def generateTables(folder,bandnames=['u','g','r','i','z']):
numberObjects = 0
writer = csv.writer(open( folder + 'out.csv', 'wb'))
paramnames = ['OBJID',
'CHISQ',
'RA',
'DEC',
'R_e']
writer.writerow(paramnames)
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
output = fits.open(folder + filename)
numBands = ((len(output) -1)/3) -1
for band in range(0,numBands):
allbandparams = []
for param in paramnames:
allbandparams += [output[band+numBands].header[param]]
writer.writerow(allbandparams)
return writer
def printAllBandGraphs(folder):
"""
Go though a folder and print all the passband images/models/residuals
for every Galfit output file. Will have to be modified for pyprofit.
"""
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
numBands = ((len(images) -1)/3) -1
print(numBands)
fig,axarr = plt.subplots(nrows=numBands, ncols=3, sharex=True,
sharey=True, figsize=(10,10))
plt.suptitle(filename)
#norm=LogNorm(),
axarr[0,0].set_title('Image')
axarr[0,1].set_title('Model')
axarr[0,2].set_title('Residual')
for band in range(0,numBands):
axarr[band,0].imshow(images[band].data,
cmap='gray', interpolation='none')
axarr[band,1].imshow(images[band + numBands].data,
cmap='gray', interpolation='none')
axarr[band,2].imshow(images[band + 2*numBands].data,
cmap='gray', interpolation='none')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
print('done a file')
plt.close('all')
def generateTables(folder,bandnames=['u','g','r','i','z']):
"""
A function to go through a folder of GalfitM output files (fits) and
print all the Sersic or Sersic/bulge parameters to a CSV
"""
numberObjects = 0
outfile = open( folder + 'out.csv', 'w')
writer = csv.writer(outfile)
#Define a non general set of params to pull out for a SDSS UGRIZ fit
paramnames = ['DATAIN_U',
'CHISQ',
'1_XC_U',
'1_YC_U',
'1_MAG_U', '1_MAG_G', '1_MAG_R','1_MAG_I','1_MAG_Z',
'1_RE_U',
'1_N_U',
'1_AR_U',
'1_PA_U']
writer.writerow(paramnames)
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
output = fits.open(folder + filename)
numBands = ((len(output) -1)/3) -1
#for band in range(0,numBands):
allbandparams = []
for param in paramnames:
#print(band,numBands,param)
allbandparams += [output[6].header[param]]
writer.writerow(allbandparams)
return writer
if __name__ == '__main__':
#printGraphs('/Users/rs548/Documents/Science/PeteHurley/UVG/')
#printAllBandGraphs('/Users/rs548/Documents/Science/PeteHurley/SDSS-M-BD/')
#print5bandGraphs('/Users/rs548/Documents/Science/PeteHurley/SM/',3)
#oneModel('/Users/rs548/Documents/Science/Blended/g-output.fits')
#generateTables('/Users/rs548/Documents/Science/PeteHurley/SDSS-XM/')
| mit |
kholidfu/bpstats | app.py | 2 | 2749 | import numpy as np
import matplotlib.pyplot as plt
import urllib2
import re
from bs4 import BeautifulSoup
import pandas as pd
class bpsData(object):
def __init__(self):
self.url = "http://www.bps.go.id/tab_sub/view.php?kat=1&tabel=1&daftar=1&id_subyek=40¬ab=1"
def get_html(self):
html = urllib2.urlopen(self.url).read()
return html
def get_provinces(self):
soup = BeautifulSoup(self.get_html())
provinces = [i.text for i in soup.find_all(
'td', style=re.compile(r"height:15.0pt"))[3:37]]
return provinces
def create_csv(self):
soup = BeautifulSoup(self.get_html())
rows = soup.find_all('tr', style=re.compile("userset;height:15.0pt"))
data = ''
for row in rows:
data += ';'.join(
[r.text.replace(',', '.') for r in row.find_all('td')])
data += '\n'
data_file = data.split('\n')
with open("data.csv", "wb") as f:
for d in data_file[6:39]: # the 1st 6 data are gibberish
f.write(d.replace(';', ',') + '\n')
def analyze_data(self):
"""analyze data with pandas
"""
df = pd.read_csv("data.csv", header=None)
df.columns = ['Provinsi', '2009|L', '2010|L', '2011|L', '2012|L', '2009|P', '2010|P', '2011|P', '2012|P', '2009|T', '2010|T', '2011|T', '2012|T']
df = df.set_index(['Provinsi'])
return df
def show_graph(self):
"""show the graph with matplotlib
"""
df = self.analyze_data()
df2 = pd.DataFrame(df, columns=['2012|L', '2012|P'])
df2 = df2.sort(['2012|L', '2012|P'], ascending=False)
ax = df2.plot(kind='barh',
stacked=True,
title="Persentase Penduduk menurut Provinsi dan Jenis Kelamin Tahun 2012",
legend=False,
fontsize='10')
# custom legend
patches, labels = ax.get_legend_handles_labels()
ax.legend(patches, ['Laki-laki', 'Perempuan'], loc='best')
plt.show()
def show_graph2(self):
"""lets try back-to-back histogram
"""
df = self.analyze_data() # raw data
ind = np.arange(len(df.index)) # x locations for the groups
width = 0.35 # width of the bars
df2 = pd.DataFrame(df, columns=['2012|L', '2012|P']) # dataframe
# begin plotting
fig, ax = plt.subplots()
rects1 = ax.bar(ind, df[[1]].values, width, color='r')
rects2 = ax.bar(ind+width, df[[5]].values, width, color='y')
ax.set_xticklabels(df.index.tolist())
plt.show()
if __name__ == "__main__":
b = bpsData()
b.create_csv()
#b.analyze_data()
b.show_graph()
| mit |
shanaka-desoysa/tensorflow | hello_world/linear_regression_orig.py | 1 | 2200 | """
Simple linear regression example in TensorFlow
This program tries to predict the number of thefts from
the number of fire in the city of Chicago
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
DATA_FILE = 'data/fire_theft.xls'
LOG_FILE = 'logs/fire_theft'
LEARNING_RATE = 0.001
NUM_EPOCH = 1000
# Step 1: read in data from the .xls file
book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
# Step 2: create placeholders for input X (number of fire) and label Y
# (number of theft)
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# Step 3: create weight and bias, initialized to 0
w = tf.Variable(0.0, name='weights')
b = tf.Variable(0.0, name='bias')
# Step 4: build model to predict Y
Y_predicted = X * w + b
# Step 5: use the square error as the loss function
loss = tf.sqrt(tf.reduce_mean(tf.square(Y - Y_predicted)), name='loss')
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
# Step 8: train the model
for i in range(NUM_EPOCH): # train the model 100 times
total_loss = 0
for x, y in data:
# Session runs train_op and fetch values of loss
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_loss += l
print('Epoch {0}: {1}'.format(i, total_loss / n_samples))
# close the writer when you're done using it
writer.close()
# Step 9: output the values of w and b
w_value, b_value = sess.run([w, b])
# plot the results
X, Y = data.T[0], data.T[1]
plt.plot(X, Y, 'bo', label='Real data')
plt.plot(X, X * w_value + b_value, 'r', label='Predicted data')
plt.legend()
plt.show()
'''
LEARNING_RATE = 0.001
NUM_EPOCH = 3000
Epoch 2999: 12.539423170543852
'''
| apache-2.0 |
ProfessorKazarinoff/staticsite | content/code/statistics/seaborn_histogram.py | 1 | 1204 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('data.csv', header=0)
colors = sns.color_palette("pastel")
# Four subplots, unpack the axes array immediately
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4, figsize=(9, 3), sharey=True)
# Plot a historgram and kernel density estimate
ax1 = sns.distplot(df.Rectangle[df.Rectangle.notnull()], color=colors[0], ax=ax1)
ax1.set_xlabel('Density Range ($g/cm^3$)')
ax1.set_title('Rectangles')
ax1.set_ylim(0,8)
ax1.set_yticklabels([])
#plt.show()
# Plot a historgram and kernel density estimate
ax2 = sns.distplot(df.Square[df.Square.notnull()], color=colors[1], ax=ax2)
ax2.set_xlabel('Density Range ($g/cm^3$)')
ax2.set_title('Squares')
#plt.show()
# Plot a historgram and kernel density estimate
ax3 = sns.distplot(df.Triangle[df.Triangle.notnull()], color=colors[2], ax=ax3)
ax3.set_xlabel('Density Range ($g/cm^3$)')
ax3.set_title('Triangle')
#plt.show()
# Plot a historgram and kernel density estimate
ax4 = sns.distplot(df.Cylinder[df.Cylinder.notnull()], color=colors[3], ax=ax4)
ax4.set_xlabel('Density Range ($g/cm^3$)')
ax4.set_title('Cylinders')
#plt.show()
plt.tight_layout()
plt.show() | gpl-3.0 |
Jimmy-Morzaria/scikit-learn | sklearn/tests/test_dummy.py | 27 | 17468 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.