function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N)) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean() | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
else:
y = x
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs, n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
Pxy /= (np.abs(windowVals)**2).sum()
# Also include scaling factors for one-sided densities and dividing by the
# sampling frequency, if desired. Scale everything, except the DC component
# and the NFFT/2 component:
Pxy[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
Pxy /= Fs
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[numFreqs//2:] - Fs, freqs[:numFreqs//2]))
Pxy = np.concatenate((Pxy[numFreqs//2:, :], Pxy[:numFreqs//2, :]), 0)
return Pxy, freqs, t | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segments and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
return Pxx, freqs, t | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. I.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
Eg., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i,j in ij:
allColumns.add(i); allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy, axis=0)
#Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
#Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, a):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu* : a numdims array of means of a
*sigma* : a numdims array of atandard deviation of a
*fracs* : the proportion of variance of each of the principal components
*Wt* : the weight vector for projecting a numdims point or array into PCA space
*Y* : a projected into PCA space
The factor loadings are in the Wt factor, ie the factor
loadings for the 1st principal component are given by Wt[0]
"""
n, m = a.shape
if n<m:
raise RuntimeError('we assume data in a is organized with numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
Y = np.dot(Vh, a.T).T
vars = s**2/float(len(s))
self.fracs = vars/vars.sum()
self.Wt = Vh
self.Y = Y | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def center(self, x):
'center the data using the mean and sigma from training set a'
return (x - self.mu)/self.sigma | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667 , 0.45962655, 0.07608613, 0.135818 ,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437 , 0.10824303, 0.1723387 , 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883 , -0.26614584, 0.88067144, 1.00474954,
-1.1616545 , 0.0266109 , 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737 , 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond] | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d)) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0 | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
This function will be removed from matplotlib.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Lyapunov Exponent
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
warnings.warn("This does not belong in matplotlib and will be removed", DeprecationWarning) # 2009/06/13
return np.mean(np.log(np.absolute(fprime(x)))) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {} | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xy = np.asarray([(x,y),])
self.dataLim.update_from_data_xy(xy, None)
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1 | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid') | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
Deprecated: use numpy.loadtxt.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree
Exercises many of these options.
"""
warnings.warn("use numpy.loadtxt", DeprecationWarning) # 2009/06/13
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2)) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2)) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','') | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def makekey(row):
return tuple([row[name] for name in key]) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1]) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key), *[iter(r) for r in recs])
def extract(r):
if r is None: return missing
else: return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + map(extract, row))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + map(extract, row))
if postfixes is None:
postfixes = ['%d'%i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix) for postfix in postfixes])
return np.rec.fromrecords(results, names=names) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, fh):
self.fh = fh | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def seek(self, arg):
self.fh.seek(arg) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def next(self):
return self.fix(self.fh.next()) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool') | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date() | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def tostr(self, x):
return self.toval(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def fromstr(self, s):
return s | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def tostr(self, x):
val = repr(x)
return val[1:-1] | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, fmt):
self.fmt = fmt | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def toval(self, x):
if x is not None:
x = x * self.scale
return x | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def tostr(self, x):
return '%d'%int(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def fromstr(self, s):
return int(s) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def toval(self, x):
return str(x) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, fmt):
self.fmt = fmt | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def griddata(x,y,z,xi,yi,interp='nn'):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
If interp keyword is set to '`nn`' (default),
uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
If the interp keyword is set to '`linear`', then linear interpolation
is used instead of natural neighbor. In this case, the output grid
is assumed to be regular with a constant grid spacing in both the x and
y directions. For regular grids with nonconstant grid spacing, you
must use natural neighbor interpolation. Linear interpolation is only valid if
:mod:`matplotlib.delaunay` package is used - :mod:`mpl_tookits.natgrid`
only provides natural neighbor interpolation.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
# make sure mask is not a scalar boolean array.
if z.mask.ndim:
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if interp != 'nn':
raise ValueError("only natural neighor interpolation"
" allowed when using natgrid toolkit in griddata.")
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
if interp == 'nn':
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
elif interp == 'linear':
# make sure grid has constant dx, dy
dx = xi[0,1:]-xi[0,0:-1]
dy = yi[1:,0]-yi[0:-1,0]
epsx = np.finfo(xi.dtype).resolution
epsy = np.finfo(yi.dtype).resolution
if dx.max()-dx.min() > epsx or dy.max()-dy.min() > epsy:
raise ValueError("output grid must have constant spacing"
" when using interp='linear'")
interp = tri.linear_interpolator(z)
zo = interp[yi.min():yi.max():complex(0,yi.shape[0]),
xi.min():xi.max():complex(0,xi.shape[1])]
else:
raise ValueError("interp keyword must be one of"
" 'linear' (for linear interpolation) or 'nn'"
" (for natural neighbor interpolation). Default is 'nn'.")
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) ) | SpaceKatt/CSPLN | [
1,
1,
1,
1,
1441155989
] |
def map_type(self, obj):
# TODO: Replace all str with unicode when done in property.default attribute
# TODO: Fix ToGuessProp as it may be a list.
if isinstance(obj, ListProp):
return list
if isinstance(obj, StringProp):
return str
if isinstance(obj, UnusedProp):
return str
if isinstance(obj, BoolProp):
return bool
if isinstance(obj, IntegerProp):
return int
if isinstance(obj, FloatProp):
return float
if isinstance(obj, CharProp):
return str
if isinstance(obj, DictProp):
return dict
if isinstance(obj, AddrProp):
return str
if isinstance(obj, ToGuessProp):
return str | naparuba/shinken | [
1129,
344,
1129,
221,
1290510176
] |
def add(self, b):
if isinstance(b, Brok):
self.broks[b.id] = b
return
if isinstance(b, ExternalCommand):
self.sched.run_external_command(b.cmd_line) | naparuba/shinken | [
1129,
344,
1129,
221,
1290510176
] |
def get(self):
self.response.out.write("Test 1:" +self.test1() +"<br>")
self.response.out.write("Test 2:" + self.test2() +"<br>")
self.response.out.write("Test 3:" + self.test3() +"<br>")
self.response.out.write("Test 4:" + self.test4() +"<br>") | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def test1(self):
key = "[email protected]"
ent_type = "Accounts"
trophy_case_widget = TrophyCase(key_name=key)
points_widget = Points(key_name=key)
rank_widget = Rank(key_name=key)
newacc = Accounts(key_name=key,
password="aaa",
email=key,
isEnabled="enabled",
accountType="bronze",
paymentType="free",
cookieKey="xxx",
apiKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != key:
return "Error getting same account. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != key:
return "Error getting same account from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"email":"[email protected]"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != "[email protected]":
return "Error getting same account after altering entity. Subtest 3"
try:
memcache_db.delete_entity(newacc, key)
except Exception:
return "Error deleting entity. Subtest 4" | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def test2(self):
account_key = "raj"
trophy_case_widget = TrophyCase(key_name=account_key)
points_widget = Points(key_name=account_key)
rank_widget = Rank(key_name=account_key)
newacc = Accounts(key_name=account_key,
password="aaa",
email="[email protected]",
isEnabled="enabled",
accountType="bronze",
paymentType="free",
apiKey="xxx",
cookieKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, account_key)
key = "testbadge1"
ent_type = "Badges"
newacc = Badges(key_name=key,
name="badge1",
description=key,
altText="a really cool badge",
setType="free",
isEnabled="yes",
creator=newacc,
permissions="private",
blobKey="xxxx",
storageType="blob")
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.description != key:
return "Error getting same account. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.description != key:
return "Error getting same account from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"isEnabled":"no", "permissions":"public"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.isEnabled != "no" or sameent.permissions != "public":
return "Error getting same account after altering entity. Subtest 3"
try:
memcache_db.delete_entity(sameent, key)
except Exception:
return "Error deleting entity. Subtest 4"
try:
memcache_db.delete_entity(newacc, account_key)
except Exception:
return "Error deleting account. Subtest 5"
return "Success" | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def test3(self):
account_key = "[email protected]"
trophy_case_widget = TrophyCase(key_name=account_key)
points_widget = Points(key_name=account_key)
rank_widget = Rank(key_name=account_key)
newacc = Accounts(key_name=account_key,
password="aaa",
email="[email protected]",
isEnabled="enabled",
accountType="bronze",
paymentType="free",
apiKey="xxx",
cookieKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, account_key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, account_key)
key = "testuser1"
ent_type = "Users"
newacc = Users(key_name=key,
userid=key,
isEnabled="yes",
accountRef=newacc,
tags = key)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != key:
return "Error getting same entity. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != key:
return "Error getting same entity from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"tags":"goodbye:hello"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != "goodbye:hello":
return "Error getting same entity after altering entity. Subtest 3"
try:
memcache_db.delete_entity(newacc, account_key)
memcache_db.delete_entity(sameent, key)
except Exception:
return "Error deleting entity. Subtest 4" | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def test4(self):
account_key = "[email protected]"
trophy_case_widget = TrophyCase(key_name=account_key)
points_widget = Points(key_name=account_key)
rank_widget = Rank(key_name=account_key)
newacc = Accounts(key_name=account_key,
password="aaa",
email="[email protected]",
isEnabled="enabled",
accountType="bronze",
paymentType="free",
apiKey="xxx",
cookieKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save an account
ret = memcache_db.save_entity(newacc, account_key)
user_key = "testuser1"
newuser = Users(key_name=user_key,
userid=user_key,
isEnabled="yes",
accountRef=newacc,
tags = user_key)
try:
memcache_db.delete_entity(newacc, user_key)
except Exception:
pass
# Save a user
ret = memcache_db.save_entity(newacc, user_key)
# Create a Badge Type
badge_key = "testbadge1"
badgetype = Badges(key_name=badge_key,
name="badge1",
description=badge_key,
altText="a really cool badge",
setType="free",
isEnabled="yes",
creator=newacc,
permissions="private",
storageType="blob",
blobKey="xxxx")
try:
memcache_db.delete_entity(badgetype, badge_key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(badgetype, badge_key) | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
""" Add to the db, get, and delete """ | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
from serverside.tools import encryption
"""Do some simple encryption and show results """
mystr = "hello, world"
self.response.out.write("encrypt string: " + mystr + "<br/>")
mystr_enc = encryption.des_encrypt_str("hello, world")
self.response.out.write("encrypted: " + mystr_enc + "<br/>")
mystr_dec = encryption.des_decrypt_str(mystr_enc)
self.response.out.write("decrypted: " + mystr_dec + "<br/>") | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
print "OS: " + os.environ["SERVER_SOFTWARE"]
self.response.out.write("OS server software: " + os.environ["SERVER_SOFTWARE"]) | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def post(self):
pass | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
self.response.out.write("Creating session and setting cookie") | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
self.response.out.write("<br/>If you reached here you are logged in!") | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
self.response.out.write("terminating the follow session:")
sess = Session().get_current_session(self)
if(sess == None):
self.response.out.write("<br/>You are not logged in!!")
else:
self.response.out.write("<br/>You are logged in as:")
email = sess.get_email()
self.response.out.write("<br/>" + email)
sess.terminate() | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
self.response.out.write("You should be able to see this page, logged in or not...")
sess = Session().get_current_session(self)
if(sess == None):
self.response.out.write("<br/>You are not logged in!!")
else:
self.response.out.write("<br/>You are logged in as:")
email = sess.get_email()
self.response.out.write("<br/>" + email) | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
log1 = {"account":"[email protected]",
'event':'getuserdata',
'api': 'get_user_data',
'is_api':'yes',
'user':"test_user",
'success':'true',
'ip':'127.0.0.1'}
log1["details"] = u"HELLO 0"
logs.create(log1)
log1["is_api"] = 'no'
log1["details"] = u"HELLO 1"
logs.create(log1)
log1["is_api"] = 'yes'
log1["details"] = u"HELLO 2"
log1["points"] = 100
logs.create(log1)
log1["details"] = u"A BUNCH OF accent e's \xe9\xe9\xe9"
logs.create(log1) | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
q = Logs.all()
q.filter("account = ", "[email protected]")
ents = q.fetch(10)
count = 0
for ii in ents:
count += 1
self.response.out.write(ii.details)
self.response.out.write("<br/>")
self.response.out.write("Number fetched " + str(count)) | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
pass | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
def get(self):
pass | nlake44/UserInfuser | [
101,
54,
101,
3,
1311320106
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.