repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
akrherz/iem
|
scripts/season/plot_cli_jul1_snow.py
|
1
|
2356
|
"""Plot CLI snow"""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot import MapPlot
from pyiem.util import get_dbconn
def main():
"""Go Main Go"""
pgconn = get_dbconn("iem", user="nobody")
df = read_sql(
"""
select station, st_x(geom), st_y(geom), snow_jul1, snow_jul1_normal
from cli_data c JOIN stations t on (t.id = c.station)
WHERE c.valid = 'YESTERDAY' and t.network = 'NWSCLI'
and snow_jul1 is not null and snow_jul1_normal is not null
and t.id not in ('RAP', 'DVN', 'FGF', 'OAX', 'MPX')
""",
pgconn,
index_col="station",
)
df["departure"] = df["snow_jul1"] - df["snow_jul1_normal"]
df["colors"] = df["departure"].apply(
lambda x: "#ff0000" if x < 0 else "#0000ff"
)
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
year = yesterday.year if yesterday.month > 6 else yesterday.year - 1
mp = MapPlot(
sector="midwest",
axisbg="white",
title="NWS Total Snowfall (inches) thru %s"
% (yesterday.strftime("%-d %B %Y"),),
subtitle=("1 July %s - %s")
% (year, datetime.datetime.today().strftime("%-d %B %Y")),
)
mp.plot_values(
df["st_x"].values,
df["st_y"].values,
df["snow_jul1"].values,
fmt="%.1f",
labelbuffer=5,
)
pqstr = (
"data ac %s0000 summary/mw_season_snowfall.png "
"mw_season_snowfall.png png"
) % (datetime.datetime.today().strftime("%Y%m%d"),)
mp.postprocess(view=False, pqstr=pqstr)
mp.close()
# Depature
mp = MapPlot(
sector="midwest",
axisbg="white",
title="NWS Total Snowfall Departure (inches) thru %s"
% (yesterday.strftime("%-d %B %Y"),),
subtitle=("1 July %s - %s")
% (year, datetime.datetime.today().strftime("%-d %B %Y")),
)
mp.plot_values(
df["st_x"].values,
df["st_y"].values,
df["departure"].values,
color=df["colors"].values,
fmt="%.1f",
labelbuffer=5,
)
pqstr = (
"data ac %s0000 summary/mw_season_snowfall_departure.png "
"mw_season_snowfall_departure.png png"
) % (datetime.datetime.today().strftime("%Y%m%d"),)
mp.postprocess(view=False, pqstr=pqstr)
mp.close()
if __name__ == "__main__":
main()
|
mit
|
massmutual/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
230
|
4524
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
FabriceSalvaire/PySpice
|
examples/relay/relay.py
|
1
|
2550
|
#r# =====================================
#r# Relay drived by a bipolar transistor
#r# =====================================
#r# This example shows the simulation of ...
####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Doc.ExampleTools import find_libraries
from PySpice.Probe.Plot import plot
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
####################################################################################################
libraries_path = find_libraries()
spice_library = SpiceLibrary(libraries_path)
####################################################################################################
#?# #cm# relay.m4
period = 50@u_ms
pulse_width = period / 2
circuit = Circuit('Relay')
# circuit.V('digital', 'Vdigital', circuit.gnd, 5@u_V)
circuit.PulseVoltageSource('clock', 'clock', circuit.gnd, 0@u_V, 5@u_V, pulse_width, period, rise_time=5@u_ms, fall_time=5@u_ms)
circuit.R('base', 'clock', 'base', 100@u_Ω)
circuit.BJT(1, 'collector', 'base', circuit.gnd, model='bjt') # Q is mapped to BJT !
circuit.model('bjt', 'npn', bf=80, cjc=pico(5), rb=100)
circuit.V('analog', 'VccAnalog', circuit.gnd, 8@u_V)
circuit.R('relay', 'VccAnalog', 1, 50@u_Ω)
circuit.L('relay', 1, 'collector', 100@u_mH)
circuit.include(spice_library['1N5822']) # Schottky diode
diode = circuit.X('D', '1N5822', 'collector', 'VccAnalog')
# Fixme: subcircuit node
# diode.minus.add_current_probe(circuit)
####################################################################################################
figure, ax = plt.subplots(figsize=(20, 10))
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=period/1000, end_time=period*1.1)
ax.set_title('')
ax.set_xlabel('Time [s]')
ax.set_ylabel('Voltage [V]')
ax.grid()
ax.plot(analysis.base)
ax.plot(analysis.collector)
# Fixme: current probe
# ax.plot((analysis['1'] - analysis.collector)/circuit.Rrelay.resistance)
ax.plot(analysis['1'] - analysis.collector)
ax.legend(('Vbase', 'Vcollector'), loc=(.05,.1))
plt.tight_layout()
plt.show()
#f# save_figure('figure', 'relay.png')
|
gpl-3.0
|
numpy/datetime
|
numpy/fft/fftpack.py
|
59
|
39653
|
"""
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
|
bsd-3-clause
|
massmutual/scikit-learn
|
examples/cluster/plot_agglomerative_clustering_metrics.py
|
402
|
4492
|
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
|
bsd-3-clause
|
ltiao/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
cjayb/mne-python
|
mne/stats/tests/test_cluster_level.py
|
2
|
28525
|
# Authors: Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
import os
import numpy as np
from scipy import sparse, linalg, stats
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
import pytest
from mne.fixes import has_numba
from mne.parallel import _force_serial
from mne.stats import cluster_level, ttest_ind_no_p, combine_adjacency
from mne.stats.cluster_level import (permutation_cluster_test, f_oneway,
permutation_cluster_1samp_test,
spatio_temporal_cluster_test,
spatio_temporal_cluster_1samp_test,
ttest_1samp_no_p, summarize_clusters_stc)
from mne.utils import (run_tests_if_main, catch_logging, check_version,
requires_sklearn)
@pytest.fixture(scope="function", params=('Numba', 'NumPy'))
def numba_conditional(monkeypatch, request):
"""Test both code paths on machines that have Numba."""
assert request.param in ('Numba', 'NumPy')
if request.param == 'NumPy' and has_numba:
monkeypatch.setattr(
cluster_level, '_get_buddies', cluster_level._get_buddies_fallback)
monkeypatch.setattr(
cluster_level, '_get_selves', cluster_level._get_selves_fallback)
monkeypatch.setattr(
cluster_level, '_where_first', cluster_level._where_first_fallback)
if request.param == 'Numba' and not has_numba:
pytest.skip('Numba not installed')
yield request.param
n_space = 50
def _get_conditions():
noise_level = 20
n_time_1 = 20
n_time_2 = 13
normfactor = np.hanning(20).sum()
rng = np.random.RandomState(42)
condition1_1d = rng.randn(n_time_1, n_space) * noise_level
for c in condition1_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
condition2_1d = rng.randn(n_time_2, n_space) * noise_level
for c in condition2_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
pseudoekp = 10 * np.hanning(25)[None, :]
condition1_1d[:, 25:] += pseudoekp
condition2_1d[:, 25:] -= pseudoekp
condition1_2d = condition1_1d[:, :, np.newaxis]
condition2_2d = condition2_1d[:, :, np.newaxis]
return condition1_1d, condition2_1d, condition1_2d, condition2_2d
def test_thresholds(numba_conditional):
"""Test automatic threshold calculations."""
# within subjects
rng = np.random.RandomState(0)
X = rng.randn(10, 1, 1) + 0.08
want_thresh = -stats.t.ppf(0.025, len(X) - 1)
assert 0.03 < stats.ttest_1samp(X[:, 0, 0], 0)[1] < 0.05
my_fun = partial(ttest_1samp_no_p)
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_1samp_test(
X, stat_fun=my_fun, seed=0, verbose=True, out_type='mask')
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert_allclose(out[2], 0.033203, atol=1e-6)
# between subjects
Y = rng.randn(10, 1, 1)
Z = rng.randn(10, 1, 1) - 0.7
X = [X, Y, Z]
want_thresh = stats.f.ppf(1. - 0.05, 2, sum(len(a) for a in X) - len(X))
p = stats.f_oneway(*X)[1]
assert 0.03 < p < 0.05
my_fun = partial(f_oneway) # just to make the check fail
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_test(X, tail=1, stat_fun=my_fun,
seed=0, verbose=True,
out_type='mask')
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert_allclose(out[2], 0.041992, atol=1e-6)
with pytest.warns(RuntimeWarning, match='Ignoring argument "tail"'):
permutation_cluster_test(X, tail=0, out_type='mask')
# nan handling in TFCE
X = np.repeat(X[0], 2, axis=1)
X[:, 1] = 0
with pytest.warns(RuntimeWarning, match='invalid value'): # NumPy
out = permutation_cluster_1samp_test(
X, seed=0, threshold=dict(start=0, step=0.1), out_type='mask')
assert (out[2] < 0.05).any()
assert not (out[2] < 0.05).all()
X[:, 0] = 0
with pytest.raises(RuntimeError, match='finite'):
with np.errstate(invalid='ignore'):
permutation_cluster_1samp_test(
X, seed=0, threshold=dict(start=0, step=0.1),
buffer_size=None, out_type='mask')
def test_cache_dir(tmpdir, numba_conditional):
"""Test use of cache dir."""
tempdir = str(tmpdir)
orig_dir = os.getenv('MNE_CACHE_DIR', None)
orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
rng = np.random.RandomState(0)
X = rng.randn(9, 2, 10)
try:
os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
os.environ['MNE_CACHE_DIR'] = tempdir
# Fix error for #1507: in-place when memmapping
with catch_logging() as log_file:
permutation_cluster_1samp_test(
X, buffer_size=None, n_jobs=2, n_permutations=1, seed=0,
stat_fun=ttest_1samp_no_p, verbose=False, out_type='mask')
assert 'independently' not in log_file.getvalue()
# ensure that non-independence yields warning
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
if check_version('numpy', '1.17'):
random_state = np.random.default_rng(0)
else:
random_state = 0
with pytest.warns(RuntimeWarning, match='independently'):
permutation_cluster_1samp_test(
X, buffer_size=10, n_jobs=2, n_permutations=1,
seed=random_state, stat_fun=stat_fun, verbose=False,
out_type='mask')
finally:
if orig_dir is not None:
os.environ['MNE_CACHE_DIR'] = orig_dir
else:
del os.environ['MNE_CACHE_DIR']
if orig_size is not None:
os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
else:
del os.environ['MNE_MEMMAP_MIN_SIZE']
def test_permutation_large_n_samples(numba_conditional):
"""Test that non-replacement works with large N."""
X = np.random.RandomState(0).randn(72, 1) + 1
for n_samples in (11, 72):
tails = (0, 1) if n_samples <= 20 else (0,)
for tail in tails:
H0 = permutation_cluster_1samp_test(
X[:n_samples], threshold=1e-4, tail=tail, out_type='mask')[-1]
assert H0.shape == (1024,)
assert len(np.unique(H0)) >= 1024 - (H0 == 0).sum()
def test_permutation_step_down_p(numba_conditional):
"""Test cluster level permutations with step_down_p."""
rng = np.random.RandomState(0)
# subjects, time points, spatial points
X = rng.randn(9, 2, 10)
# add some significant points
X[:, 0:2, 0:2] += 2 # span two time points and two spatial points
X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude
thresh = 2
# make sure it works when we use ALL points in step-down
t, clusters, p, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=1.0, out_type='mask')
# make sure using step-down will actually yield improvements sometimes
t, clusters, p_old, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.0, out_type='mask')
assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster
p_min = np.min(p_old)
assert_allclose(p_min, 0.003906, atol=1e-6)
t, clusters, p_new, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.05, out_type='mask')
assert_equal(np.sum(p_new < 0.05), 2) # time one rescued
assert np.all(p_old >= p_new)
p_next = p_new[(p_new > 0.004) & (p_new < 0.05)][0]
assert_allclose(p_next, 0.015625, atol=1e-6)
def test_cluster_permutation_test(numba_conditional):
"""Test cluster level permutations tests."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
for condition1, condition2 in zip((condition1_1d, condition1_2d),
(condition2_1d, condition2_2d)):
T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
[condition1, condition2], n_permutations=100, tail=1, seed=1,
buffer_size=None, out_type='mask')
p_min = np.min(cluster_p_values)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
assert_allclose(p_min, 0.01, atol=1e-6)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
T_obs, clusters, cluster_p_values_buff, hist =\
permutation_cluster_test([condition1, condition2],
n_permutations=100, tail=1, seed=1,
n_jobs=2, buffer_size=buffer_size,
out_type='mask')
assert_array_equal(cluster_p_values, cluster_p_values_buff)
# test param deprecation
with pytest.deprecated_call():
_ = permutation_cluster_test(
[condition1_1d, condition2_1d], n_permutations=10, out_type=None)
def stat_fun(X, Y):
return stats.f_oneway(X, Y)[0]
with pytest.warns(RuntimeWarning, match='is only valid'):
permutation_cluster_test([condition1, condition2], n_permutations=1,
stat_fun=stat_fun, out_type='mask')
@pytest.mark.parametrize('stat_fun', [
ttest_1samp_no_p,
partial(ttest_1samp_no_p, sigma=1e-1)
])
def test_cluster_permutation_t_test(numba_conditional, stat_fun):
"""Test cluster level permutations T-test."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
# use a very large sigma to make sure Ts are not independent
for condition1, p in ((condition1_1d, 0.01),
(condition1_2d, 0.01)):
# these are so significant we can get away with fewer perms
T_obs, clusters, cluster_p_values, hist =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=0, seed=1, out_type='mask',
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
p_min = np.min(cluster_p_values)
assert_allclose(p_min, p, atol=1e-6)
T_obs_pos, c_1, cluster_p_values_pos, _ =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=1, threshold=1.67, seed=1,
stat_fun=stat_fun, out_type='mask',
buffer_size=None)
T_obs_neg, _, cluster_p_values_neg, _ =\
permutation_cluster_1samp_test(-condition1, n_permutations=100,
tail=-1, threshold=-1.67,
seed=1, stat_fun=stat_fun,
buffer_size=None, out_type='mask')
assert_array_equal(T_obs_pos, -T_obs_neg)
assert_array_equal(cluster_p_values_pos < 0.05,
cluster_p_values_neg < 0.05)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
with pytest.warns(None): # sometimes "independently"
T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \
permutation_cluster_1samp_test(
-condition1, n_permutations=100, tail=-1, out_type='mask',
threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun,
buffer_size=buffer_size)
assert_array_equal(T_obs_neg, T_obs_neg_buff)
assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
# Bad stat_fun
with pytest.raises(TypeError, match='must be .* ndarray'):
permutation_cluster_1samp_test(
condition1, threshold=1, stat_fun=lambda x: None,
out_type='mask')
with pytest.raises(ValueError, match='not compatible'):
permutation_cluster_1samp_test(
condition1, threshold=1, stat_fun=lambda x: stat_fun(x)[:-1],
out_type='mask')
@requires_sklearn
def test_cluster_permutation_with_adjacency(numba_conditional):
"""Test cluster level permutations with adjacency matrix."""
from sklearn.feature_extraction.image import grid_to_graph
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
n_pts = condition1_1d.shape[1]
# we don't care about p-values in any of these, so do fewer permutations
args = dict(seed=None, max_step=1, exclude=None, out_type='mask',
step_down_p=0, t_power=1, threshold=1.67,
check_disjoint=False, n_permutations=50)
did_warn = False
for X1d, X2d, func, spatio_temporal_func in \
[(condition1_1d, condition1_2d,
permutation_cluster_1samp_test,
spatio_temporal_cluster_1samp_test),
([condition1_1d, condition2_1d],
[condition1_2d, condition2_2d],
permutation_cluster_test,
spatio_temporal_cluster_test)]:
out = func(X1d, **args)
adjacency = grid_to_graph(1, n_pts)
out_adjacency = func(X1d, adjacency=adjacency, **args)
assert_array_equal(out[0], out_adjacency[0])
for a, b in zip(out_adjacency[1], out[1]):
assert_array_equal(out[0][a], out[0][b])
assert np.all(a[b])
# test spatio-temporal w/o time adjacency (repeat spatial pattern)
adjacency_2 = sparse.coo_matrix(
linalg.block_diag(adjacency.asfptype().todense(),
adjacency.asfptype().todense()))
# nesting here is time then space:
adjacency_2a = combine_adjacency(np.eye(2), adjacency)
assert_array_equal(adjacency_2.toarray().astype(bool),
adjacency_2a.toarray().astype(bool))
if isinstance(X1d, list):
X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
else:
X1d_2 = np.concatenate((X1d, X1d), axis=1)
out_adjacency_2 = func(X1d_2, adjacency=adjacency_2, **args)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_adjacency_2[0][:split])
assert_array_equal(out[0], out_adjacency_2[0][split:])
# make sure we really got 2x the number of original clusters
n_clust_orig = len(out[1])
assert len(out_adjacency_2[1]) == 2 * n_clust_orig
# Make sure that we got the old ones back
data_1 = {np.sum(out[0][b[:n_pts]]) for b in out[1]}
data_2 = {np.sum(out_adjacency_2[0][a]) for a in
out_adjacency_2[1][:]}
assert len(data_1.intersection(data_2)) == len(data_1)
# now use the other algorithm
if isinstance(X1d, list):
X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
else:
X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))
out_adjacency_3 = spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=adjacency,
max_step=0, threshold=1.67, check_disjoint=True)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_adjacency_3[0][0])
assert_array_equal(out[0], out_adjacency_3[0][1])
# make sure we really got 2x the number of original clusters
assert len(out_adjacency_3[1]) == 2 * n_clust_orig
# Make sure that we got the old ones back
data_1 = {np.sum(out[0][b[:n_pts]]) for b in out[1]}
data_2 = {np.sum(out_adjacency_3[0][a[0], a[1]]) for a in
out_adjacency_3[1]}
assert len(data_1.intersection(data_2)) == len(data_1)
# test new versus old method
out_adjacency_4 = spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=adjacency,
max_step=2, threshold=1.67)
out_adjacency_5 = spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=adjacency,
max_step=1, threshold=1.67)
# clusters could be in a different order
sums_4 = [np.sum(out_adjacency_4[0][a])
for a in out_adjacency_4[1]]
sums_5 = [np.sum(out_adjacency_4[0][a])
for a in out_adjacency_5[1]]
sums_4 = np.sort(sums_4)
sums_5 = np.sort(sums_5)
assert_array_almost_equal(sums_4, sums_5)
if not _force_serial:
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
n_permutations=1, adjacency=adjacency,
max_step=1, threshold=1.67, n_jobs=-1000)
# not enough TFCE params
with pytest.raises(KeyError, match='threshold, if dict, must have'):
spatio_temporal_func(
X1d_3, adjacency=adjacency, threshold=dict(me='hello'))
# too extreme a start threshold
with pytest.warns(None) as w:
spatio_temporal_func(X1d_3, adjacency=adjacency,
threshold=dict(start=10, step=1))
if not did_warn:
assert len(w) == 1
did_warn = True
with pytest.raises(ValueError, match='threshold.*<= 0 for tail == -1'):
spatio_temporal_func(
X1d_3, adjacency=adjacency, tail=-1,
threshold=dict(start=1, step=-1))
with pytest.warns(RuntimeWarning, match='threshold.* is more extreme'):
spatio_temporal_func(
X1d_3, adjacency=adjacency, tail=1,
threshold=dict(start=100, step=1))
bad_con = adjacency.todense()
with pytest.raises(ValueError, match='must be a SciPy sparse matrix'):
spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=bad_con,
max_step=1, threshold=1.67)
bad_con = adjacency.tocsr()[:-1, :-1].tocoo()
with pytest.raises(ValueError, match='adjacency.*the correct size'):
spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=bad_con,
max_step=1, threshold=1.67)
with pytest.raises(TypeError, match='must be a'):
spatio_temporal_func(
X1d_3, adjacency=adjacency, threshold=[])
with pytest.raises(ValueError, match='Invalid value for the \'tail\''):
with pytest.warns(None): # sometimes ignoring tail
spatio_temporal_func(
X1d_3, adjacency=adjacency, tail=2)
# make sure it actually found a significant point
out_adjacency_6 = spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=adjacency, max_step=1,
threshold=dict(start=1, step=1))
assert np.min(out_adjacency_6[2]) < 0.05
with pytest.raises(ValueError, match='not compatible'):
with pytest.warns(RuntimeWarning, match='No clusters'):
spatio_temporal_func(
X1d_3, n_permutations=50, adjacency=adjacency,
threshold=1e-3, stat_fun=lambda *x: f_oneway(*x)[:-1],
buffer_size=None)
@pytest.mark.parametrize('threshold', [
0.1,
pytest.param(dict(start=0., step=0.5), id='TFCE'),
])
@pytest.mark.parametrize('kind', ('1samp', 'ind'))
def test_permutation_cluster_signs(threshold, kind):
"""Test cluster signs."""
# difference between two conditions for 3 subjects x 2 vertices x 2 times
X = np.array([[[-10, 5], [-2, -7]],
[[-4, 5], [-8, -0]],
[[-6, 3], [-4, -2]]], float)
want_signs = np.sign(np.mean(X, axis=0))
n_permutations = 1
if kind == '1samp':
func = permutation_cluster_1samp_test
stat_fun = ttest_1samp_no_p
use_X = X
else:
assert kind == 'ind'
func = permutation_cluster_test
stat_fun = ttest_ind_no_p
use_X = [X, np.random.RandomState(0).randn(*X.shape) * 0.1]
tobs, clu, clu_pvalues, _ = func(
use_X, n_permutations=n_permutations, threshold=threshold, tail=0,
stat_fun=stat_fun, out_type='mask')
clu_signs = np.zeros(X.shape[1:])
used = np.zeros(X.shape[1:])
assert len(clu) == len(clu_pvalues)
for c, p in zip(clu, clu_pvalues):
assert not used[c].any()
assert len(np.unique(np.sign(tobs[c]))) == 1
clu_signs[c] = np.sign(tobs[c])[0]
used[c] = True
assert used.all()
assert clu_signs.all()
assert_array_equal(np.sign(tobs), want_signs)
assert_array_equal(clu_signs, want_signs)
@requires_sklearn
def test_permutation_adjacency_equiv(numba_conditional):
"""Test cluster level permutations with and without adjacency."""
from sklearn.feature_extraction.image import grid_to_graph
rng = np.random.RandomState(0)
# subjects, time points, spatial points
n_time = 2
n_space = 4
X = rng.randn(6, n_time, n_space)
# add some significant points
X[:, :, 0:2] += 10 # span two time points and two spatial points
X[:, 1, 3] += 20 # span one time point
max_steps = [1, 1, 1, 2, 1]
# This will run full algorithm in two ways, then the ST-algorithm in 2 ways
# All of these should give the same results
adjs = [None,
grid_to_graph(n_time, n_space),
grid_to_graph(1, n_space),
grid_to_graph(1, n_space),
None]
stat_map = None
thresholds = [2, 2, 2, 2, dict(start=0.01, step=1.0)]
sig_counts = [2, 2, 2, 2, 5]
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
cs = None
ps = None
for thresh, count, max_step, adj in zip(thresholds, sig_counts,
max_steps, adjs):
t, clusters, p, H0 = \
permutation_cluster_1samp_test(
X, threshold=thresh, adjacency=adj, n_jobs=2,
max_step=max_step, stat_fun=stat_fun, seed=0, out_type='mask')
# make sure our output datatype is correct
assert isinstance(clusters[0], np.ndarray)
assert clusters[0].dtype == bool
assert_array_equal(clusters[0].shape, X.shape[1:])
# make sure all comparisons were done; for TFCE, no perm
# should come up empty
inds = np.where(p < 0.05)[0]
assert_equal(len(inds), count)
assert_allclose(p[inds], 0.03125, atol=1e-6)
if isinstance(thresh, dict):
assert_equal(len(clusters), n_time * n_space)
assert np.all(H0 != 0)
continue
this_cs = [clusters[ii] for ii in inds]
this_ps = p[inds]
this_stat_map = np.zeros((n_time, n_space), dtype=bool)
for ci, c in enumerate(this_cs):
if isinstance(c, tuple):
this_c = np.zeros((n_time, n_space), bool)
for x, y in zip(c[0], c[1]):
this_stat_map[x, y] = True
this_c[x, y] = True
this_cs[ci] = this_c
c = this_c
this_stat_map[c] = True
if cs is None:
ps = this_ps
cs = this_cs
if stat_map is None:
stat_map = this_stat_map
assert_array_equal(ps, this_ps)
assert len(cs) == len(this_cs)
for c1, c2 in zip(cs, this_cs):
assert_array_equal(c1, c2)
assert_array_equal(stat_map, this_stat_map)
@requires_sklearn
def test_spatio_temporal_cluster_adjacency(numba_conditional):
"""Test spatio-temporal cluster permutations."""
from sklearn.feature_extraction.image import grid_to_graph
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
rng = np.random.RandomState(0)
noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)
data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])
noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)
data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])
adj = grid_to_graph(data1_2d.shape[-1], 1)
threshold = dict(start=4.0, step=2)
T_obs, clusters, p_values_adj, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d], adjacency=adj,
n_permutations=50, tail=1, seed=1,
threshold=threshold, buffer_size=None)
buffer_size = data1_2d.size // 10
T_obs, clusters, p_values_no_adj, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=buffer_size)
assert_equal(np.sum(p_values_adj < 0.05), np.sum(p_values_no_adj < 0.05))
# make sure results are the same without buffer_size
T_obs, clusters, p_values2, hist2 = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=None)
assert_array_equal(p_values_no_adj, p_values2)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=1, threshold=-2.)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=-1, threshold=2.)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=0, threshold=-1)
def ttest_1samp(X):
"""Return T-values."""
return stats.ttest_1samp(X, 0)[0]
def test_summarize_clusters():
"""Test cluster summary stcs."""
clu = (np.random.random([1, 20484]),
[(np.array([0]), np.array([0, 2, 4]))],
np.array([0.02, 0.1]),
np.array([12, -14, 30]))
stc_sum = summarize_clusters_stc(clu)
assert stc_sum.data.shape[1] == 2
clu[2][0] = 0.3
pytest.raises(RuntimeError, summarize_clusters_stc, clu)
def test_permutation_test_H0(numba_conditional):
"""Test that H0 is populated properly during testing."""
rng = np.random.RandomState(0)
data = rng.rand(7, 10, 1) - 0.5
with pytest.warns(RuntimeWarning, match='No clusters found'):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=100, n_permutations=1024, seed=rng)
assert_equal(len(h0), 0)
for n_permutations in (1024, 65, 64, 63):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=0.1, n_permutations=n_permutations, seed=rng)
assert_equal(len(h0), min(n_permutations, 64))
assert isinstance(clust[0], tuple) # sets of indices
for tail, thresh in zip((-1, 0, 1), (-0.1, 0.1, 0.1)):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=thresh, seed=rng, tail=tail, out_type='mask')
assert isinstance(clust[0], np.ndarray) # bool mask
# same as "128 if tail else 64"
assert_equal(len(h0), 2 ** (7 - (tail == 0))) # exact test
def test_tfce_thresholds(numba_conditional):
"""Test TFCE thresholds."""
rng = np.random.RandomState(0)
data = rng.randn(7, 10, 1) - 0.5
# if tail==-1, step must also be negative
with pytest.raises(ValueError, match='must be < 0 for tail == -1'):
permutation_cluster_1samp_test(
data, tail=-1, out_type='mask', threshold=dict(start=0, step=0.1))
# this works (smoke test)
permutation_cluster_1samp_test(data, tail=-1, out_type='mask',
threshold=dict(start=0, step=-0.1))
# thresholds must be monotonically increasing
with pytest.raises(ValueError, match='must be monotonically increasing'):
permutation_cluster_1samp_test(
data, tail=1, out_type='mask', threshold=dict(start=1, step=-0.5))
run_tests_if_main()
|
bsd-3-clause
|
djgroen/flee-release
|
outputanalysis/PlotErrorComparison.py
|
1
|
5425
|
#This script should be run using the main flee directory as working directory.
import pandas as pd
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import numpy as np
import sys
import warnings
import analysis as a
import StoreDiagnostics as dd
import FormatPyplotFigures as fpf
import CalculateDiagnostics
warnings.filterwarnings("ignore")
def compare_numagents_camp(out_dir, datas, name, legend_loc=4):
"""
Advanced plotting function for validation of refugee registration numbers in camps.
"""
fig = fpf.prepare_figure(xlabel="Days elapsed")
labelssim = []
n=0
for data in datas:
y1 = data["%s sim" % name].as_matrix()
y2 = data["%s data" % name].as_matrix()
days = np.arange(len(y1))
#Plotting lines representing simulation results.
labelsim, = plt.plot(days,y1, linewidth=8, label="%s (%s)" % (names[n], name))
labelssim.append(labelsim)
n+=1
# Add label for the naieve model if it is enabled.
plt.legend(handles=labelssim,loc=legend_loc,prop={'size':18})
fig.savefig("%s/%s-%s.png" % (out_dir, name, legend_loc))
# Rescaled values
plt.clf()
fig = fpf.prepare_figure(xlabel="Days elapsed")
plt.xlabel("Days elapsed")
plt.ylabel("Number of refugees")
labelssim = []
n=0
for data in datas:
y1 = data["%s sim" % name].as_matrix()
days = np.arange(len(y1))
simtot = data["refugees in camps (simulation)"].as_matrix().flatten()
untot = data["refugees in camps (UNHCR)"].as_matrix().flatten()
#print(y1,simtot,untot)
y1_rescaled = np.zeros(len(y1))
for i in range(0, len(y1_rescaled)):
# Only rescale if simtot > 0
if simtot[i] > 0:
y1_rescaled[i] = y1[i] * untot[i] / simtot[i]
print(y1_rescaled)
labelsim, = plt.plot(days, y1_rescaled, linewidth=8, label="%s (%s)" % (names[n], name))
labelssim.append(labelsim)
n += 1
#labeldata, = plt.plot(days, y1, linewidth=8, label="%s UNHCR data" % (name.title()))
plt.legend(handles=labelssim,loc=legend_loc,prop={'size':18})
fig.savefig("%s/%s-%s-rescaled.png" % (out_dir, name, legend_loc))
#Start of the code, assuring arguments of out-folder & csv file are kept
if __name__ == "__main__":
in_dirs = []
names = []
if len(sys.argv) == 1:
print("Usage: python3 <this_script> <outdir1> <label1> <outdir2> <label2> ... <outdirN> <labelN> <output directory for plot files>")
for i in range(1, len(sys.argv)-1):
if i%2 == 1:
in_dirs.append(sys.argv[i])
else:
names.append(sys.argv[i])
out_dir = sys.argv[-1]
matplotlib.style.use('ggplot')
#figsize=(15, 10)
refugee_data = []
print(in_dirs)
for d in in_dirs:
refugee_data.append(pd.read_csv("%s/out.csv" % (d), sep=',', encoding='latin1',index_col='Day'))
#Identifying location names for graphs
rd_cols = list(refugee_data[0].columns.values)
location_names = []
for i in rd_cols:
if " sim" in i:
if "numAgents" not in i:
location_names.append(' '.join(i.split()[:-1]))
plt.xlabel("Days elapsed")
# Calculate the best offset.
sim_refs = []
un_refs = []
raw_refs = []
for i in range(0, len(refugee_data)):
sim_refs.append(refugee_data[i].loc[:,["refugees in camps (simulation)"]].as_matrix().flatten())
un_refs.append(refugee_data[i].loc[:,["refugees in camps (UNHCR)"]].as_matrix().flatten())
raw_refs.append(refugee_data[i].loc[:,["raw UNHCR refugee count"]].as_matrix().flatten())
loc_errors = []
sim_errors = []
nmodel = False
#plot numagents compare by camp.
for i in location_names:
compare_numagents_camp(out_dir, refugee_data, i, legend_loc=4)
for i in range(0, len(refugee_data)):
loc_errors.append([])
for j in location_names:
loc_errors[i].append(CalculateDiagnostics.calculate_errors(out_dir, refugee_data[i], j, naieve_model=nmodel))
sim_errors.append(dd.SimulationErrors(loc_errors[i]))
matplotlib.rcParams.update({'font.size': 20})
plt.clf()
# ERROR PLOTS
#Size of plots/figures
fig = fpf.prepare_figure()
#Plotting and saving error (differences) graph
plt.ylabel("Averaged relative difference")
plt.xlabel("Days elapsed")
handle_list = []
for i in range(0, len(in_dirs)):
diffdata = (sim_errors[i].abs_diff(rescaled=False) / np.maximum(un_refs[i], np.ones(len(un_refs[i]))))
diffdata_rescaled = (sim_errors[i].abs_diff() / np.maximum(un_refs[i], np.ones(len(un_refs[i]))))
print(out_dir,": Averaged error normal: ", np.mean(diffdata), ", rescaled: ", np.mean(diffdata_rescaled),", len: ", len(diffdata))
plt.plot(np.arange(len(diffdata_rescaled)), diffdata_rescaled, linewidth=5, label="error %s" % names[i])
plt.legend(loc=1,prop={'size':14})
plt.savefig("%s/error-compare-runs.png" % out_dir)
plt.clf()
fig = fpf.prepare_figure()
#Plotting and saving error (differences) graph
plt.ylabel("Number of agents in camps (simulation)")
plt.xlabel("Days elapsed")
for i in range(0, len(in_dirs)):
#refugee_data[i].loc[:,["total refugees (simulation)","refugees in camps (simulation)","raw UNHCR refugee count","refugee_debt"]].plot(linewidth=5, label="refugees in camps (sim) %s" % names[i])
plt.plot(np.arange(len(un_refs[i])), refugee_data[i].loc[:,["refugees in camps (simulation)"]], linewidth=5, label=names[i])
plt.legend(loc=1,prop={'size':14})
plt.savefig("%s/numsim-compare-runs.png" % out_dir)
|
bsd-3-clause
|
mjudsp/Tsallis
|
benchmarks/bench_plot_svd.py
|
325
|
2899
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
|
bsd-3-clause
|
wind-python/windpowerlib
|
windpowerlib/power_output.py
|
1
|
11334
|
"""
The ``power_output`` module contains functions to calculate the power output
of a wind turbine.
SPDX-FileCopyrightText: 2019 oemof developer group <[email protected]>
SPDX-License-Identifier: MIT
"""
import numpy as np
import pandas as pd
def power_coefficient_curve(
wind_speed,
power_coefficient_curve_wind_speeds,
power_coefficient_curve_values,
rotor_diameter,
density,
):
r"""
Calculates the turbine power output using a power coefficient curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'power_coefficient_curve'.
Parameters
----------
wind_speed : :pandas:`pandas.Series<series>` or numpy.array
Wind speed at hub height in m/s.
power_coefficient_curve_wind_speeds : :pandas:`pandas.Series<series>` or numpy.array
Wind speeds in m/s for which the power coefficients are provided in
`power_coefficient_curve_values`.
power_coefficient_curve_values : :pandas:`pandas.Series<series>` or numpy.array
Power coefficients corresponding to wind speeds in
`power_coefficient_curve_wind_speeds`.
rotor_diameter : float
Rotor diameter in m.
density : :pandas:`pandas.Series<series>` or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
:pandas:`pandas.Series<series>` or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_ [2]_:
.. math:: P=\frac{1}{8}\cdot\rho_{hub}\cdot d_{rotor}^{2}
\cdot\pi\cdot v_{wind}^{3}\cdot cp\left(v_{wind}\right)
with:
P: power [W], :math:`\rho`: density [kg/m³], d: diameter [m],
v: wind speed [m/s], cp: power coefficient
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power coefficient curve is
zero.
References
----------
.. [1] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, pages 35ff, 208
.. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 542
"""
power_coefficient_time_series = np.interp(
wind_speed,
power_coefficient_curve_wind_speeds,
power_coefficient_curve_values,
left=0,
right=0,
)
power_output = (
1
/ 8
* density
* rotor_diameter ** 2
* np.pi
* np.power(wind_speed, 3)
* power_coefficient_time_series
)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(
data=power_output,
index=wind_speed.index,
name="feedin_power_plant",
)
else:
power_output = np.array(power_output)
return power_output
def power_curve(
wind_speed,
power_curve_wind_speeds,
power_curve_values,
density=None,
density_correction=False,
):
r"""
Calculates the turbine power output using a power curve.
This function is carried out when the parameter `power_output_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'power_curve'. If
the parameter `density_correction` is True the density corrected power
curve (see :py:func:`~.power_curve_density_correction`) is used.
Parameters
----------
wind_speed : :pandas:`pandas.Series<series>` or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : :pandas:`pandas.Series<series>` or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : pandas.Series or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : :pandas:`pandas.Series<series>` or numpy.array
Density of air at hub height in kg/m³. This parameter is needed
if `density_correction` is True. Default: None.
density_correction : bool
If the parameter is True the density corrected power curve (see
:py:func:`~.power_curve_density_correction`) is used for the
calculation of the turbine power output. In this case `density`
cannot be None. Default: False.
Returns
-------
:pandas:`pandas.Series<series>` or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-------
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
"""
if density_correction is False:
power_output = np.interp(
wind_speed,
power_curve_wind_speeds,
power_curve_values,
left=0,
right=0,
)
# Power_output as pd.Series if wind_speed is pd.Series (else: np.array)
if isinstance(wind_speed, pd.Series):
power_output = pd.Series(
data=power_output,
index=wind_speed.index,
name="feedin_power_plant",
)
else:
power_output = np.array(power_output)
elif density_correction is True:
power_output = power_curve_density_correction(
wind_speed, power_curve_wind_speeds, power_curve_values, density
)
else:
raise TypeError(
"'{0}' is an invalid type. ".format(type(density_correction))
+ "`density_correction` must "
+ "be Boolean (True or False)."
)
return power_output
def power_curve_density_correction(
wind_speed, power_curve_wind_speeds, power_curve_values, density
):
r"""
Calculates the turbine power output using a density corrected power curve.
This function is carried out when the parameter `density_correction` of an
instance of the :class:`~.modelchain.ModelChain` class is True.
Parameters
----------
wind_speed : :pandas:`pandas.Series<series>` or numpy.array
Wind speed at hub height in m/s.
power_curve_wind_speeds : :pandas:`pandas.Series<series>` or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : :pandas:`pandas.Series<series>` or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
density : :pandas:`pandas.Series<series>` or numpy.array
Density of air at hub height in kg/m³.
Returns
-------
:pandas:`pandas.Series<series>` or numpy.array
Electrical power output of the wind turbine in W.
Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used for the site specific power curve wind
speeds [1]_ [2]_ [3]_:
.. math:: v_{site}=v_{std}\cdot\left(\frac{\rho_0}
{\rho_{site}}\right)^{p(v)}
with:
.. math:: p=\begin{cases}
\frac{1}{3} & v_{std} \leq 7.5\text{ m/s}\\
\frac{1}{15}\cdot v_{std}-\frac{1}{6} & 7.5
\text{ m/s}<v_{std}<12.5\text{ m/s}\\
\frac{2}{3} & \geq 12.5 \text{ m/s}
\end{cases},
v: wind speed [m/s], :math:`\rho`: density [kg/m³]
:math:`v_{std}` is the standard wind speed in the power curve
(:math:`v_{std}`, :math:`P_{std}`),
:math:`v_{site}` is the density corrected wind speed for the power curve
(:math:`v_{site}`, :math:`P_{std}`),
:math:`\rho_0` is the ambient density (1.225 kg/m³)
and :math:`\rho_{site}` the density at site conditions (and hub height).
It is assumed that the power output for wind speeds above the maximum
and below the minimum wind speed given in the power curve is zero.
References
----------
.. [1] Svenningsen, L.: "Power Curve Air Density Correction And Other
Power Curve Options in WindPRO". 1st edition, Aalborg,
EMD International A/S , 2010, p. 4
.. [2] Svenningsen, L.: "Proposal of an Improved Power Curve Correction".
EMD International A/S , 2010
.. [3] Biank, M.: "Methodology, Implementation and Validation of a
Variable Scale Simulation Model for Windpower based on the
Georeferenced Installation Register of Germany". Master's Thesis
at Reiner Lemoine Institute, 2014, p. 13
"""
if density is None:
raise TypeError(
"`density` is None. For the calculation with a "
+ "density corrected power curve density at hub "
+ "height is needed."
)
# Convert pd.Series to a numpy array to speed up the interpolation below.
if isinstance(wind_speed, pd.Series):
# save the indexes for later conversion to pd.Series
wind_speed_indexes = wind_speed.index
# change the wind speed Series to numpy array
wind_speed = wind_speed.values
# Set the panda series flag True
panda_series = True
else:
panda_series = False
power_output = _get_power_output(
wind_speed,
np.array(power_curve_wind_speeds),
np.array(density),
np.array(power_curve_values),
)
# Convert results to the data type of the input data
if panda_series:
power_output = pd.Series(
data=power_output,
index=wind_speed_indexes, # Use previously saved wind speed index
name="feedin_power_plant",
)
return power_output
def _get_power_output(
wind_speed, power_curve_wind_speeds, density, power_curve_values
):
"""Get the power output at each timestep using only numpy to speed up performance
Parameters
----------
wind_speed : :numpy:`numpy.ndarray`
Wind speed at hub height in m/s.
power_curve_wind_speeds : :numpy:`numpy.ndarray`
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
density : :numpy:`numpy.ndarray`
Density of air at hub height in kg/m³.
power_curve_values : :numpy:`numpy.ndarray`
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
Returns
-------
:numpy:`numpy.array`
Electrical power output of the wind turbine in W.
"""
# Calculate the power curves for each timestep using vectors
# NOTE: power_curves_per_ts.shape = [len(wind_speed), len(density)]
power_curves_per_ts = (
(1.225 / density).reshape(-1, 1)
** np.interp(power_curve_wind_speeds, [7.5, 12.5], [1 / 3, 2 / 3])
) * power_curve_wind_speeds
# Create the interpolation function
def interp_func(w_speed, p_curves):
return np.interp(
w_speed, p_curves, power_curve_values, left=0, right=0
)
# Calculate the power output by mapping the arrays to the interp function
power_output = np.array(
list(map(interp_func, wind_speed, power_curves_per_ts))
)
return power_output
|
mit
|
dunkhong/grr
|
colab/grr_colab/magics.py
|
2
|
15824
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""GRR Colab magics module.
The module contains implementation of **magic** commands that use GRR API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import shlex
from IPython.core import magic_arguments
import pandas as pd
from typing import Text
from grr_colab import magics_impl
_PATH_TYPE_CHOICES = [magics_impl.OS, magics_impl.TSK, magics_impl.REGISTRY]
def grr_set_no_flow_timeout(line):
"""Disables flow timeout (it means wait forever).
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
"""
del line # Unused.
magics_impl.grr_set_no_flow_timeout_impl()
def grr_set_default_flow_timeout(line):
"""Sets flow timeout to default value (30 seconds).
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
"""
del line # Unused.
magics_impl.grr_set_default_flow_timeout_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'timeout', help='Timeout in seconds', type=int, nargs='?', default=None)
def grr_set_flow_timeout(line):
"""Sets flow timeout.
Specifying 0 as timeout means not to wait.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
"""
args = grr_set_flow_timeout.parser.parse_args(shlex.split(line))
magics_impl.grr_set_flow_timeout_impl(args.timeout)
def grr_list_artifacts(line):
"""Lists all registered GRR artifacts.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Sequence of artifact descriptors.
"""
del line # Unused.
return magics_impl.grr_list_artifacts_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument('-i', '--ip', help='IP address', type=str)
@magic_arguments.argument('-m', '--mac', help='MAC address', type=str)
@magic_arguments.argument('-h', '--host', help='Hostname', type=str)
@magic_arguments.argument('-u', '--user', help='Username', type=str)
@magic_arguments.argument('-v', '--version', help='Client version', type=str)
@magic_arguments.argument(
'-l', '--label', help='Client label', type=str, action='append')
def grr_search_clients(line):
"""Searches for clients with specified keywords.
Args:
line: A string representing arguments passed to the magic command.
Returns:
List of clients.
"""
args = grr_search_clients.parser.parse_args(shlex.split(line))
return magics_impl.grr_search_clients_impl(
ip=args.ip,
mac=args.mac,
host=args.host,
user=args.user,
version=args.version,
labels=args.label)
@magic_arguments.magic_arguments()
@magic_arguments.argument('-i', '--ip', help='IP address', type=str)
@magic_arguments.argument('-m', '--mac', help='MAC address', type=str)
@magic_arguments.argument('-h', '--host', help='Hostname', type=str)
@magic_arguments.argument('-u', '--user', help='Username', type=str)
@magic_arguments.argument('-v', '--version', help='Client version', type=str)
@magic_arguments.argument(
'-l', '--label', help='Client label', type=str, action='append')
def grr_search_online_clients(line):
"""Searches for online clients with specified keywords.
Args:
line: A string representing arguments passed to the magic command.
Returns:
List of online clients.
"""
args = grr_search_online_clients.parser.parse_args(shlex.split(line))
return magics_impl.grr_search_online_clients_impl(
ip=args.ip,
mac=args.mac,
host=args.host,
user=args.user,
version=args.version,
labels=args.label)
@magic_arguments.magic_arguments()
@magic_arguments.argument('-h', '--hostname', help='Hostname', type=str)
@magic_arguments.argument('-c', '--client', help='Client ID', type=str)
def grr_set_client(line):
"""Sets a new client for the current state.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
"""
args = grr_set_client.parser.parse_args(shlex.split(line))
magics_impl.grr_set_client_impl(args.hostname, args.client)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-r', '--reason', help='Reason for the approval', type=str)
@magic_arguments.argument(
'-a',
'--approvers',
help='Notified users who can approve the request',
type=str,
nargs='+')
@magic_arguments.argument(
'-w', '--wait', action='store_true', help='Wait until approval is granted')
def grr_request_approval(line):
"""Sends approval request to the selected client for the current user.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_request_approval.parser.parse_args(shlex.split(line))
magics_impl.grr_request_approval_impl(args.reason, args.approvers, args.wait)
def grr_id(line):
"""Returns ID of the selected client.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing ID of a client.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_id_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument('path', help='Directory path', type=str)
def grr_cd(line):
"""Changes the current directory.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Nothing.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_cd.parser.parse_args(shlex.split(line))
magics_impl.grr_cd_impl(args.path)
def grr_pwd(line):
"""Returns absolute path to the current directory.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Absolute path to the current directory.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_pwd_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument('path', help='Directory path', type=str, nargs='?')
@magic_arguments.argument(
'-C',
'--cached',
action='store_true',
help='Use cached filesystem instead of making call to a client')
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
def grr_ls(line):
"""Lists files in the specified directory or the current directory.
Args:
line: A string representing arguments passed to the magic command.
Returns:
A sequence of stat entries.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_ls.parser.parse_args(shlex.split(line))
return magics_impl.grr_ls_impl(
path=args.path, cached=args.cached, path_type=args.path_type)
@magic_arguments.magic_arguments()
@magic_arguments.argument('path', help='File path', type=str)
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
def grr_stat(line):
"""Stats the file specified.
Accepts glob expressions as a file path.
Args:
line: A string representing arguments passed to the magic command.
Returns:
A sequence of stat entries.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_stat.parser.parse_args(shlex.split(line))
return magics_impl.grr_stat_impl(path=args.path, path_type=args.path_type)
@magic_arguments.magic_arguments()
@magic_arguments.argument('path', help='File path', type=str)
@magic_arguments.argument(
'-c', '--bytes', default=4096, help='Number of bytes to read', type=int)
@magic_arguments.argument(
'-o', '--offset', default=0, help='Number of bytes to skip', type=int)
@magic_arguments.argument(
'-C',
'--cached',
action='store_true',
help='Use cached filesystem instead of making call to a client')
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
def grr_head(line):
"""Reads the first bytes of a specified file.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Specified number of the first bytes of the file.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_head.parser.parse_args(shlex.split(line))
return magics_impl.grr_head_impl(
path=args.path,
bytes=args.bytes,
offset=args.offset,
cached=args.cached,
path_type=args.path_type)
@magic_arguments.magic_arguments()
@magic_arguments.argument('pattern', help='Pattern to search for', type=str)
@magic_arguments.argument('path', help='File path', type=str)
@magic_arguments.argument(
'-F',
'--fixed-strings',
action='store_true',
help='Interpret pattern as a fixed string (literal)')
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
@magic_arguments.argument(
'-X',
'--hex-string',
action='store_true',
help='Interpret pattern as a hex-encoded byte string')
def grr_grep(line):
"""Greps for a given content of a specified file.
Args:
line: A string representing arguments passed to the magic command.
Returns:
A list of buffer references to the matched content.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_grep.parser.parse_args(shlex.split(line))
return magics_impl.grr_grep_impl(
pattern=args.pattern,
path=args.path,
fixed_strings=args.fixed_strings,
path_type=args.path_type,
hex_string=args.hex_string)
@magic_arguments.magic_arguments()
@magic_arguments.argument('literal', help='Literal to search for', type=str)
@magic_arguments.argument('path', help='File path', type=str)
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
@magic_arguments.argument(
'-X',
'--hex-string',
action='store_true',
help='Interpret pattern as a hex-encoded byte string')
def grr_fgrep(line):
"""Greps for a given literal content of a specified file.
Is the same as running: %grr_grep -F
Args:
line: A string representing arguments passed to the magic command.
Returns:
A list of buffer references to the matched content.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_fgrep.parser.parse_args(shlex.split(line))
return magics_impl.grr_fgrep_impl(
literal=args.literal,
path=args.path,
path_type=args.path_type,
hex_string=args.hex_string)
def grr_interrogate(line):
"""Creates Interrogate flow for the chosen client.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Client summary including system and client info, interfaces, and users.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_interrogate_impl()
def grr_hostname(line):
"""Returns hostname of the selected client.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing hostname of a client.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_hostname_impl()
def grr_ifconfig(line):
"""Lists network interfaces of the selected client.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Sequence of interfaces.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_ifconfig_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-m', '--machine', action='store_true', help='Get machine hardware name')
@magic_arguments.argument(
'-r',
'--kernel-release',
action='store_true',
help='Get kernel release string')
def grr_uname(line):
"""Returns certain system infornamtion.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing some system information.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_uname.parser.parse_args(shlex.split(line))
return magics_impl.grr_uname_impl(args.machine, args.kernel_release)
def grr_ps(line):
"""Lists processes of the selected client.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Sequence of processes.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
del line # Unused.
return magics_impl.grr_ps_impl()
@magic_arguments.magic_arguments()
@magic_arguments.argument('sql', help='SQL statement', type=str)
def grr_osqueryi(line):
"""Runs given SQL statement on client osquery.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Osquery table.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_osqueryi.parser.parse_args(shlex.split(line))
return magics_impl.grr_osqueryi_impl(args.sql)
@magic_arguments.magic_arguments()
@magic_arguments.argument('artifact', help='Name of the artifact', type=str)
def grr_collect(line):
"""Collects specified artifact.
Args:
line: A string representing arguments passed to the magic command.
Returns:
Sequence of results that artifact collection yielded.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_collect.parser.parse_args(shlex.split(line))
return magics_impl.grr_collect_impl(args.artifact)
@magic_arguments.magic_arguments()
@magic_arguments.argument('signature', help='YARA rule to use', type=str)
@magic_arguments.argument('-r', '--regex', help='Process name regex', type=str)
@magic_arguments.argument(
'-p', '--pids', help='Pids of processes to scan', type=int, nargs='+')
def grr_yara(line):
"""Scans processes using provided YARA rule.
Args:
line: A string representing arguments passed to the magic command.
Returns:
A sequence of YARA matches.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_yara.parser.parse_args(shlex.split(line))
return magics_impl.grr_yara_impl(args.signature, args.pids, args.regex)
@magic_arguments.magic_arguments()
@magic_arguments.argument('path', help='File path', type=str)
@magic_arguments.argument(
'-C',
'--cached',
action='store_true',
help='Use cached filesystem instead of making call to a client')
@magic_arguments.argument(
'-P',
'--path-type',
help='Path type',
type=str,
choices=_PATH_TYPE_CHOICES,
default=magics_impl.OS)
def grr_wget(line):
"""Downloads a file and returns a link to it.
Args:
line: A string representing arguments passed to the magic command.
Returns:
A link to the file.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_wget.parser.parse_args(shlex.split(line))
return magics_impl.grr_wget_impl(
path=args.path, cached=args.cached, path_type=args.path_type)
|
apache-2.0
|
AWPorter/aima-python
|
submissions/Hawley/kmeans.py
|
13
|
1586
|
import numpy as np
import matplotlib.pyplot as plt
# import fileinput
N = 100 # number of observations / 'points'
K = 4 # number of categories / 'means'
P = 10 # plot interval
def distance(x1,y1,x2,y2): # pythagorean distance
return np.sqrt( (x2-x1)**2 + (y2-y1)**2)
# Fancy data structure: We will group points by common indices in separate arrays,
# i.e. the first point will have coordinates (x[1],y[1])
points_x = np.random.rand(N) # points are random on [0,1]
points_y = np.random.rand(N)
colors = np.random.rand(N).astype(int) # colors will show who belongs to which mean
means_x = np.random.rand(K) # initialize means w/ random numbers on [0,1]
means_y = np.random.rand(K)
fig = plt.figure()
iterations = 100
for i in range(iterations):
# loop over all points: figure out who belongs to which means (assign colors)
for j in range(N):
min_dist = 99999.9 # big number
for m in range(K): # loop over all means
dist = distance(points_x[j], points_y[j], means_x[m], means_y[m])
if (dist < min_dist): # then update the color
min_dist = dist
colors[j] = m
#re-evaluate means
for m in range(K):
inds = np.where( m == colors) # indices of everybody belonging to one mean
means_x[m] = np.mean(points_x[inds]) # take the mean of the x-values in the group
means_y[m] = np.mean(points_y[inds]) # take the mean of the y-values in the group
# Update the picture
if(not i % P):
plt.scatter(points_x, points_y, c=colors, s=50, alpha=0.7)
plt.show()
# print('Proceed', '?')
# proceed = fileinput.input()
|
mit
|
eulerreich/keras
|
examples/kaggle_otto_nn.py
|
70
|
3775
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
|
mit
|
PatrickOReilly/scikit-learn
|
sklearn/utils/tests/test_class_weight.py
|
50
|
13151
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
bsd-3-clause
|
g-weatherill/oq-risklib
|
openquake/commonlib/commands/plot_loss.py
|
1
|
2034
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2015, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import pickle
from openquake.commonlib import sap
def make_figure(output_key, losses, poes):
"""
Plot a loss curve
"""
# NB: matplotlib is imported inside, otherwise nosetest would fail in an
# installation without matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.grid(True)
ax.set_ylim([0, 1])
ax.set_xlabel(output_key)
ax.set_ylabel('PoE')
ax.plot(losses, poes)
return plt
def plot_loss(risk_pik, output_key):
"""
Loss curves plotter. For the moment it is restricted to the
aggregate curves.
:param risk_pik: the pathname to a pickled file
:param output_key: an unique string for the output to plot
"""
# read the data
with open(risk_pik) as f:
out = pickle.load(f)
if output_key not in out:
print('key %s not found: availables %s' % (output_key, sorted(out)))
return
loss_curve = out[output_key]
plt = make_figure(output_key, loss_curve['losses'], loss_curve['poes'])
plt.show()
parser = sap.Parser(plot_loss)
parser.arg('risk_pik', '.pik file containing the result of a computation')
parser.arg('output_key', 'an unique string for the output to plot')
|
agpl-3.0
|
CtraliePubs/SOCGMM2016_SlidingWindowVideo
|
2SinesSlidingCommensurate/do2SinesCommensurateSliding.py
|
1
|
1758
|
import sys
sys.path.append("../")
sys.path.append("../S3DGLPy")
from SlidingWindow1D import *
from PCAGL import *
from sklearn.decomposition import PCA
import numpy as np
import scipy
if __name__ == '__main__':
T = 60
N = 600
NPeriods = N/T
t = np.linspace(0, 2*np.pi*NPeriods, N)
x = np.cos(t) + np.cos(3*t)
plt.plot(x)
plt.title('cos(t) + cos(3t)')
plt.axis('off')
plt.show()
W = T
M = N-W+1
y = getSlidingWindowEmbedding(x, W)
sio.savemat("2Sines.mat", {"X":y})
pca = PCA()
Y = pca.fit_transform(y)
Y = Y/np.max(np.abs(Y))
sio.savemat("Y.mat", {"Y":Y})
np.savetxt("Y.txt", Y, fmt='%g', delimiter=' ', newline='\n')
Y = Y[:, 0:3]
c = plt.get_cmap('jet')
C = c(np.array(np.round(np.linspace(0, 255, Y.shape[0])), dtype=np.int64))
C = C[:, 0:3]
#Step 1: Ouput 3D PCA
angles = np.pi/2*np.ones((Y.shape[0], 2))
angles[:, 0] = np.linspace(0, np.pi/4, Y.shape[0])
angles[:, 1] = np.linspace(np.pi/2 - np.pi/4, np.pi/2, Y.shape[0])
doPCAGLPlot(Y, C, angles, "Points")
#Step 2: Output sliding window
output1DSliding_SignalAlone(x, N, W, "Signal")
#Step 3: Combine Sliding Window with 3D PCA in one set of plots
plt.figure(figsize=(12, 6))
for starti in range(M):
plt.clf()
f = scipy.misc.imread("Signal%i.png"%starti)
plt.subplot(121)
plt.imshow(f)
plt.axis('off')
plt.subplot(122)
P = scipy.misc.imread("Points%i.png"%starti)
plt.imshow(P)
plt.axis('off')
plt.savefig("Frames%i.png"%starti, dpi=150, bbox_inches = 'tight')
subprocess.call(["avconv", "-r", "60", "-i" , "Frames%d.png", "-r", "60", "-b", "30000k", "Frames.ogg"])
|
apache-2.0
|
jdugge/GridToTIN
|
terrain_model.py
|
1
|
6762
|
import scipy.spatial as ss
import scipy.interpolate as si
import scipy.optimize as so
import numpy as np
import rasterio
import triangle
import matplotlib.tri as mpltri
import matplotlib.pyplot as plt
def norm(a, order=2):
return np.linalg.norm(a.flatten(), ord=order)/a.size**(1/order)
class TerrainModel:
def __init__(self, dem):
with rasterio.drivers():
with rasterio.open(dem) as src:
raw_data = src.read()
self.dem = np.array(raw_data.squeeze(), dtype=float)
self.xx, self.yy = np.indices(self.dem.shape)
self.points = np.array([[ 0, 0],
[self.dem.shape[1]-1, 0],
[self.dem.shape[1]-1, self.dem.shape[0]-1],
[ 0, self.dem.shape[0]-1]])
self.point_elevations = self.dem_elevation(*self.points.T)
self.triangulation_dirty = True
self._triangulation = self.triangulation()
self.interpolation_map_dirty = True
self._interpolation_map = self.interpolation_map()
self.improvement_map = np.zeros_like(self.dem)
self.improvement_map[:] = np.nan
def dem_elevation(self, x, y):
return self.dem[y, x]
def insert_point(self, x, y):
self.points = np.append(self.points, [[x,y]], axis=0)
self.point_elevations = np.append(self.point_elevations,
[self.dem_elevation(x, y)], axis=0)
self.triangulation_dirty = True
self.interpolation_map_dirty = True
def triangulation(self, recalculate=False):
if recalculate or self.triangulation_dirty:
self._triangulation = triangle.delaunay(self.points)
self._triangulation = mpltri.Triangulation(*self.points.T,
triangles=self._triangulation)
self.triangulation_dirty = False
return self._triangulation
def interpolation_map(self, recalculate=False):
if recalculate or self.interpolation_map_dirty:
interpolator = mpltri.LinearTriInterpolator(self.triangulation(),
self.point_elevations)
self._interpolation_map = interpolator(self.yy, self.xx)
self.interpolation_map_dirty = False
return self._interpolation_map
def error_map(self):
return self.interpolation_map() - self.dem
def plot_triangulation(self):
error_map = self.error_map()
max_error = np.max(np.abs(error_map))
min_elevation = np.min(self.dem)
max_elevation = np.max(self.dem)
fig, ax = plt.subplots(1,3, figsize=(15, 6))
ax[0].imshow(self.dem,
origin='top',
cmap='viridis',
vmin=min_elevation,
vmax=max_elevation)
ax[0].triplot(self.points[:,0],
self.points[:,1],
self.triangulation().triangles,
color='red', linewidth=2)
ax[0].margins(0)
ax[1].imshow(self.interpolation_map(),
origin='top',
cmap='viridis',
vmin=min_elevation,
vmax=max_elevation)
ax[1].triplot(self.points[:,0],
self.points[:,1],
self.triangulation().triangles,
color='red', linewidth=2)
ax[1].margins(0)
ax[2].imshow(error_map, origin='top',
cmap='RdBu',
vmin=-max_error,
vmax=max_error)
ax[2].triplot(self.points[:,0],
self.points[:,1],
self.triangulation().triangles,
color='red', linewidth=2)
ax[2].margins(0)
def test_point(self, p, error=None, interpolation=None, error_norm_order=2, update_improvement_map=False):
# Calculate old error
error_old = norm(self.interpolation_map() - self.dem, error_norm_order)
# Append the new coordinates
p = np.round(p).astype(int)
points = np.vstack([self.points, [p]])
values = np.append(self.point_elevations, self.dem_elevation(p[0], p[1]))
# Retriangulate
tri_new = triangle.delaunay(points)
tri_new = mpltri.Triangulation(*points.T, triangles=tri_new)
# Reinterpolate
interpolator = mpltri.LinearTriInterpolator(tri_new, values)
interpolation_new = interpolator(self.yy, self.xx)
# Calculate new error
error_new = norm(interpolation_new - self.dem, error_norm_order)
improvement = error_new - error_old
if update_improvement_map:
self.improvement_map[p[1], p[0]] = improvement
return improvement
def point_with_greatest_improvement(self, error_norm_order):
self.improvement_map[:] = np.nan
if error_norm_order == np.inf:
opt = so.brute(self.test_point,
[(0,self.dem.shape[1]-1),
(0,self.dem.shape[0]-1)],
Ns=25, args = (None, None, np.inf, True))
x_new, y_new = np.round(opt).astype(int)
improvement = np.nanmin(self.improvement_map)
else:
opt = so.differential_evolution(self.test_point,
args = [None, None, error_norm_order, True],
bounds = [(0,self.dem.shape[1]-1),
(0,self.dem.shape[0]-1)],
popsize=20, tol=0.0001)
x_new, y_new = np.round(opt.x).astype(int)
improvement = opt.fun
return (x_new, y_new), improvement
def point_with_greatest_error(self):
y, x = np.unravel_index(np.argmax(np.abs(self.error_map().flatten())), self.dem.shape)
error = self.error_map()[y,x]
return (x, y), error
def full_improvement_map(self, error_norm_order=2):
yy, xx = np.indices(self.dem.shape)
self.improvement_map[:] = np.nan
numpoints = len(self.dem.flatten())
for i, (x, y) in enumerate(zip(xx.flatten(), yy.flatten())):
percent_done = round((i+1)/numpoints*100, 1)
print('{:>5}'.format(percent_done), "%: Testing point:", (x,y), end="")
clear_output(wait=True)
tm.test_point([x, y], error_norm_order=error_norm_order, update_improvement_map=True)
|
mit
|
simpeg/simpeg
|
examples/14-utils/plot_surface2ind_topo.py
|
1
|
2638
|
"""
Utils: surface2ind_topo
=======================
Here we show how to use :code:`Utils.surface2ind_topo` to identify
cells below a topographic surface and compare the different options
"""
import numpy as np
from SimPEG import Mesh
from SimPEG import Utils
from SimPEG.Utils import surface2ind_topo, mkvc
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def run(plotIt=True, nx=5, ny=5):
# 2D mesh
mesh = Mesh.TensorMesh([nx, ny], x0='CC')
xtopo = mesh.vectorNx
# define a topographic surface
topo = 0.4*np.sin(xtopo*5)
# make it an array
Topo = np.hstack([Utils.mkvc(xtopo, 2), Utils.mkvc(topo, 2)])
# Compare the different options
indtopoCC_near = surface2ind_topo(mesh, Topo, gridLoc='CC', method='nearest')
indtopoN_near = surface2ind_topo(mesh, Topo, gridLoc='N', method='nearest')
indtopoCC_linear = surface2ind_topo(mesh, Topo, gridLoc='CC', method='linear')
indtopoN_linear = surface2ind_topo(mesh, Topo, gridLoc='N', method='linear')
indtopoCC_cubic = surface2ind_topo(mesh, Topo, gridLoc='CC', method='cubic')
indtopoN_cubic = surface2ind_topo(mesh, Topo, gridLoc='N', method='cubic')
if plotIt:
fig, ax = plt.subplots(2, 3, figsize=(9, 6))
ax = mkvc(ax)
xinterpolate = np.linspace(mesh.gridN[:, 0].min(), mesh.gridN[:, 0].max(),100)
listindex = [indtopoCC_near,indtopoN_near,indtopoCC_linear,indtopoN_linear,indtopoCC_cubic,indtopoN_cubic]
listmethod = ['nearest','nearest', 'linear', 'linear', 'cubic', 'cubic']
for i in range(6):
mesh.plotGrid(ax=ax[i], nodes=True, centers=True)
mesh.plotImage(listindex[i], ax=ax[i], pcolorOpts = {"alpha":0.5, "cmap":plt.cm.gray})
ax[i].scatter(Topo[:,0], Topo[:,1], color = 'black', marker = 'o',s = 50)
ax[i].plot(
xinterpolate,
interp1d(Topo[:, 0], Topo[:, 1], kind=listmethod[i])(xinterpolate),
'--k',
linewidth=3
)
ax[i].xaxis.set_ticklabels([])
ax[i].yaxis.set_ticklabels([])
ax[i].set_aspect('equal')
ax[i].set_xlabel('')
ax[i].set_ylabel('')
ax[0].set_xlabel('Nearest Interpolation', fontsize=16)
ax[2].set_xlabel('Linear Interpolation', fontsize=16)
ax[4].set_xlabel('Cubic Interpolation', fontsize=16)
ax[0].set_ylabel('Cells Center \n based selection', fontsize=16)
ax[1].set_ylabel('Nodes \n based selection', fontsize=16)
plt.tight_layout()
if __name__ == '__main__':
run()
plt.show()
|
mit
|
xavierwu/scikit-learn
|
examples/linear_model/plot_lasso_and_elasticnet.py
|
249
|
1982
|
"""
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
|
bsd-3-clause
|
HeraclesHX/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
261
|
2836
|
# Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
bitemyapp/ggplot
|
ggplot/geoms/geom_dotplot.py
|
12
|
3168
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_dotplot(geom):
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'weight': None, 'y': None}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'bin', 'position': 'stack'}
_extra_requires = {'y', 'width'}
_aes_renames = {'size': 'linewidth', 'fill': 'color', 'color': 'edgecolor'}
# NOTE: Currently, geom_dotplot does not support mapping
# to alpha TODO: raise exception
_units = {'alpha'}
def _sort_list_types_by_x(self, pinfo):
"""
Sort the lists in pinfo according to pinfo['x']
"""
# Remove list types from pinfo
_d = {}
for k in list(pinfo.keys()):
if not is_string(pinfo[k]) and cbook.iterable(pinfo[k]):
_d[k] = pinfo.pop(k)
# Sort numerically if all items can be cast
try:
x = list(map(np.float, _d['x']))
except ValueError:
x = _d['x']
idx = np.argsort(x)
# Put sorted lists back in pinfo
for key in _d:
pinfo[key] = [_d[key][i] for i in idx]
return pinfo
def _plot_unit(self, pinfo, ax):
categorical = is_categorical(pinfo['x'])
# If x is not numeric, the bins are sorted acc. to x
# so the list type aesthetics must be sorted too
if categorical:
pinfo = self._sort_list_types_by_x(pinfo)
pinfo.pop('weight')
x = pinfo.pop('x')
width = np.array(pinfo.pop('width'))
heights = pinfo.pop('y')
labels = x
# layout and spacing
#
# matplotlib needs the left of each bin and it's width
# if x has numeric values then:
# - left = x - width/2
# otherwise x is categorical:
# - left = cummulative width of previous bins starting
# at zero for the first bin
#
# then add a uniform gap between each bin
# - the gap is a fraction of the width of the first bin
# and only applies when x is categorical
_left_gap = 0
_spacing_factor = 0 # of the bin width
if not categorical:
left = np.array([x[i]-width[i]/2 for i in range(len(x))])
else:
_left_gap = 0.2
_spacing_factor = 0.105 # of the bin width
_breaks = np.append([0], width)
left = np.cumsum(_breaks[:-1])
_sep = width[0] * _spacing_factor
left = left + _left_gap + [_sep * i for i in range(len(left))]
step = np.max(heights) / 24.
for (_x, _y) in zip(left + width, heights):
yvals = np.arange(0, _y, step) + step/2
pinfo['s'] = 240
ax.scatter(np.repeat(_x, len(yvals)), yvals, **pinfo)
ax.autoscale()
if categorical:
ax.set_xticks(left+width)
ax.set_xticklabels(x)
|
bsd-2-clause
|
dsavransky/EXOSIMS
|
EXOSIMS/util/plotKeepoutMap.py
|
1
|
16957
|
# -*- coding: utf-8 -*-
"""
Plotting utility for the production of Keepout Map Related Products
Generalized from makeKeepoutMap.py (authored by Gabriel Soto)
Written by: Dean Keithly
Written on: 3/6/2019
"""
import os
from EXOSIMS.util.vprint import vprint
import random as myRand
import sys, os.path, EXOSIMS, EXOSIMS.MissionSim
try:
import cPickle as pickle
except:
import pickle
import os
import numpy as np
from numpy import nan
if not 'DISPLAY' in os.environ.keys(): #Check environment for keys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import colors
import argparse
import json
from EXOSIMS.util.read_ipcluster_ensemble import gen_summary
from EXOSIMS.util.read_ipcluster_ensemble import read_all
from numpy import linspace
from matplotlib.ticker import NullFormatter, MaxNLocator
from matplotlib import ticker
import astropy.units as u
import matplotlib.patheffects as PathEffects
import datetime
import re
from EXOSIMS.util.vprint import vprint
import astropy.units as u
from astropy.time import Time
import time
class plotKeepoutMap(object):
""" This plotting utility plots anything pertaining to keepout maps
"""
_modtype = 'util'
def __init__(self, args=None):
vprint(args)
vprint('initialize plotKeepoutMap done')
pass
def singleRunPostProcessing(self, PPoutpath=None, folder=None):
"""This is called by runPostProcessing
Args:
PPoutpath (string) - output path to place data in
folder (string) - full filepath to folder containing runs
"""
if not os.path.exists(folder):#Folder must exist
raise ValueError('%s not found'%folder)
if not os.path.exists(PPoutpath):#PPoutpath must exist
raise ValueError('%s not found'%PPoutpath)
outspecfile = os.path.join(folder,'outspec.json')
if not os.path.exists(outspecfile):#outspec file not found
raise ValueError('%s not found'%outspecfile)
#Create Mission Object To Extract Some Plotting Limits
sim = EXOSIMS.MissionSim.MissionSim(outspecfile, nopar=True)
obs = sim.Observatory
TL = sim.TargetList #target list
missionStart = sim.TimeKeeping.missionStart #Time Object
TK = sim.TimeKeeping
##########################################################################################
#### Generate Keepout map #array of Target List star indeces
N = np.arange(0,TL.nStars)
#Generate Keepout over Time
koEvaltimes = np.arange(TK.missionStart.value, TK.missionStart.value+TK.missionLife.to('day').value,1) #2year mission, I guess
koEvaltimes = Time(koEvaltimes,format='mjd')
#initial arrays
koGood = np.zeros([TL.nStars,len(koEvaltimes)]) #keeps track of when a star is in keepout or not (True = observable)
culprit = np.zeros([TL.nStars,len(koEvaltimes),11]) #keeps track of whose keepout the star is under
#calculating keepout angles for all stars
tic = time.clock()
for n in np.arange(TL.nStars):
koGood[n,:],r_body, r_targ, culprit[n,:,:], koangles = obs.keepout(TL,n,koEvaltimes,True)
toc = time.clock()
print('This took %s seconds' %(str(toc-tic)))
# Define Colors
#green:#00802b
#purplish:7F7FFF
#crap taupe:DEDE7F
#GOLD: FFD500
#GREY:747783
cmap = colors.ListedColormap(['white','#FFD500', 'blue', '#747783','red','m','red']) #colors for used to indicate a culprit behind keepout
bounds=[0,1,2,3,4,5,6,7]
norm = colors.BoundaryNorm(bounds, cmap.N)
#creating an array of colors based on culprit
koColor = np.zeros([TL.nStars,len(koEvaltimes)])
for t in np.arange(0,len(koEvaltimes)):
sunFault = [bool(culprit[x,t,0]) for x in np.arange(TL.nStars)]
earthFault = [bool(culprit[x,t,2]) for x in np.arange(TL.nStars)]
moonFault = [bool(culprit[x,t,1]) for x in np.arange(TL.nStars)]
mercFault = [bool(culprit[x,t,3]) for x in np.arange(TL.nStars)]
venFault = [bool(culprit[x,t,4]) for x in np.arange(TL.nStars)]
marsFault = [bool(culprit[x,t,5]) for x in np.arange(TL.nStars)]
koColor[marsFault ,t] = 4#red
koColor[venFault ,t] = 5#m
koColor[mercFault ,t] = 6#red
koColor[moonFault ,t] = 3#747783
koColor[earthFault,t] = 2#blue
koColor[sunFault ,t] = 1#FFD500
#plotting colors on a 2d map
plt.close(546832183)
fig = plt.figure(546832183, figsize=(10,5))
fig.subplots_adjust(bottom=0.15)
gs = gridspec.GridSpec(1,2, width_ratios=[6,1], height_ratios=[1])
gs.update(wspace=0.06, hspace=0.06) # set the spacing between axes.
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = plt.subplot(gs[1])
ax = plt.subplot(gs[0])
#I'm plotting a subset of koColor here that looked good to me for half the mission time (1yr)
if koColor.shape[0] > 100: #Determine maximum number of stars to track keepouts for
NUMBER_Y = 100
else:
NUMBER_Y = koColor.shape[0]
sInds = np.linspace(0, koColor.shape[0], num=NUMBER_Y, endpoint=False, dtype=int).tolist()
img = plt.imshow(koColor[sInds,0:int(np.floor(len(koEvaltimes)))], aspect='auto',#4,
cmap=cmap,interpolation='none',origin='lower',norm=norm)
ax.set_xlabel('Mission Elapsed Time (d), Mission Start %s UTC MJD' %(str(TK.missionStart.value)), weight='bold')
ax.set_ylabel(r'Target Star, $i$', weight='bold')
ax.set_xlim(left=0.,right=np.max(koEvaltimes).value-TK.missionStart.value)
ax.set_ylim(bottom=0.,top=NUMBER_Y)
outline=PathEffects.withStroke(linewidth=5, foreground='black')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[0],label='Visible',path_effects=[outline])
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[1],label=ur"$\u2609$")
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[2],label=ur'$\oplus$')##\u2641$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[3],label=ur'$\u263D$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[4],label=ur'$\u2642\u263F$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[5],label=ur'$\u2640$')
#plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[6],label=ur'$\u263F$') duplicate color so appended above
leg = plt.legend(framealpha=1.0)
# get the lines and texts inside legend box
leg_lines = leg.get_lines()
leg_texts = leg.get_texts()
# bulk-set the properties of all lines and texts
plt.setp(leg_lines, linewidth=4)
plt.setp(leg_texts, fontsize='x-large')
nullfmt = NullFormatter()
ax2.yaxis.set_major_formatter(nullfmt)
#Plot horizontal histogram
tTotal = np.max(koEvaltimes).value-TK.missionStart.value # Calculate total time width of koMap
tVis = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis.append(len(np.where(koColor[sInds[i],:]==0)[0]))
width = np.zeros(len(tVis))+1.
ax2.barh(np.arange(len(sInds))+0.5,np.asarray(tVis,dtype=float)/tTotal*100., width, align='center', color='black')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=NUMBER_Y)
ax2.set_xlabel('% Time\n Visible', weight='bold')
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMap_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot a koMap scaled down to 1 year
if TK.missionLife.to('year').value > 1.0:# years
plt.close(56846512161)
fig = plt.figure(56846512161)
fig.subplots_adjust(bottom=0.15)
gs = gridspec.GridSpec(1,2, width_ratios=[6,1], height_ratios=[1])
gs.update(wspace=0.06, hspace=0.06) # set the spacing between axes.
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = plt.subplot(gs[1])
ax = plt.subplot(gs[0])
#I'm plotting a subset of koColor here that looked good to me for half the mission time (1yr)
if koColor.shape[0] > 100: #Determine maximum number of stars to track keepouts for
NUMBER_Y = 100
else:
NUMBER_Y = koColor.shape[0]
sInds = np.linspace(0, koColor.shape[0], num=NUMBER_Y, endpoint=False, dtype=int).tolist()
img = plt.imshow(koColor[sInds,0:365], aspect='auto',#4,
cmap=cmap,interpolation='none',origin='lower',norm=norm)
ax.set_xlabel('Mission Elapsed Time (d)\nMission Start %s UTC MJD' %(str(TK.missionStart.value)), weight='bold')
ax.set_ylabel(r'Target Star, $i$', weight='bold')
ax.set_xlim(left=0.,right=365.)
ax.set_ylim(bottom=0.,top=NUMBER_Y)
outline=PathEffects.withStroke(linewidth=5, foreground='black')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[0],label='Visible',path_effects=[outline])
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[1],label=ur'$\u2609$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[2],label=ur'$\oplus$')#\u2641$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[3],label=ur'$\u263D$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[4],label=ur'$\u2642\u263F$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[5],label=ur'$\u2640$')
#plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[6],label=ur'$\u263F$')
leg = plt.legend(framealpha=1.0)
# get the lines and texts inside legend box
leg_lines = leg.get_lines()
leg_texts = leg.get_texts()
# bulk-set the properties of all lines and texts
plt.setp(leg_lines, linewidth=4)
plt.setp(leg_texts, fontsize='x-large')
nullfmt = NullFormatter()
ax2.yaxis.set_major_formatter(nullfmt)
#Plot horizontal histogram
tTotal = np.max(koEvaltimes).value-TK.missionStart.value # Calculate total time width of koMap
tVis = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis.append(len(np.where(koColor[sInds[i],:]==0)[0]))
width = np.zeros(len(tVis))+1.
ax2.barh(np.arange(len(sInds))+0.5,np.asarray(tVis,dtype=float)/tTotal*100., width, align='center', color='black')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=NUMBER_Y)
ax2.set_xlabel('% Time\n Visible', weight='bold')
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapScaled_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot a Histogram of Percent Time Visible 10 bins
plt.close(65685621)
fig = plt.figure(65685621)
bins = np.linspace(start=0,stop=100,num=11)
plt.hist(np.asarray(tVis)/tTotal*100., bins=bins, color='black', alpha=1., histtype='bar', ec='black')
plt.ylabel('Target Count', weight='bold')
plt.xlabel('Time Visible (%)', weight='bold')
plt.xlim((0,100))
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapHist10_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot as Histogram of Percent Time Visible Many Bins
plt.close(98735654)
fig = plt.figure(98735654)
bins = np.linspace(start=0,stop=np.round(np.max(tVis)/tTotal*100.),num=np.round(np.max(tVis)/tTotal*100.)+1)
plt.hist(np.asarray(tVis)/tTotal*100., bins=bins, color='black', alpha=1., histtype='bar', ec='black')
plt.ylabel('Target Count', weight='bold')
plt.xlabel('Time Visible (%)', weight='bold')
plt.xlim((0,np.ceil(np.max(tVis)/tTotal*100.)))
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapHistDetail_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot Hist and CDF of % time ######################################
tVis2 = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis2.append(len(np.where(koColor[i,:]==0)[0]))
if tVis2[-1] > tTotal:
tVis2[-1] = tTotal
bins = np.linspace(start=0,stop=np.round(np.max(tVis2)/tTotal*100.),num=np.round(np.max(tVis2)/tTotal*100.)+1)
n, bins, patches = plt.figure(665465461286584).add_subplot(1,1,1).hist(np.asarray(tVis2)/tTotal*100., bins=bins)
plt.show(block=False)
plt.close(665465461286584) # doing this just to descroy above plot Replace with numpy.histogram in future
cdf = np.cumsum(n)#cumtrapz(n, bins[:-1], initial=0.)
cdf = cdf/np.max(cdf)
plt.close(23623)
fig2 = plt.figure(23623)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = fig2.add_subplot(1,1,1)
bins = np.linspace(start=0,stop=np.round(np.max(tVis2)/tTotal*100.),num=np.round(np.max(tVis2)/tTotal*100.)+1)
n2, bins2, patches2 = ax2.hist(np.asarray(tVis2)/tTotal*100.,zorder=8,color='black', bins=bins[1:])
ax2.set_xlabel('Percent Time Visible (%)', weight='bold')
ax3 = ax2.twinx()
ax3.plot(bins[:-1],cdf*100.,zorder=10, color='red')
#DELETEax2.spines['right'].set_color('red') # setting the right side axis to red
ax3.spines['right'].set_color('red') # setting the right side axis to red
ax3.xaxis.label.set_color('red')
ax3.tick_params(axis='y', colors='red')
ax2.set_ylabel('Target Count', weight='bold')
ax3.set_ylabel('CDF (%)', weight='bold', color='red')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=1.1*np.max(n2))
ax3.set_ylim(bottom=0.,top=100.)
plt.show(block=False)
fname = 'koMapHIST_CDF_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
###########################################################
|
bsd-3-clause
|
ricket1978/ggplot
|
ggplot/utils/ggutils.py
|
13
|
5903
|
"""Helper methods for ggplot.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import json
import os
import sys
import six
# API-docs from ggplot2: GPL-2 licensed
def ggsave(filename = None, plot = None, device = None, format = None,
path = None, scale = 1, width = None, height = None, units = "in",
dpi = 300, limitsize=True, **kwargs):
"""Save a ggplot with sensible defaults
ggsave is a convenient function for saving a plot. It defaults to
saving the last plot that you displayed, and for a default size uses
the size of the current graphics device. It also guesses the type of
graphics device from the extension. This means the only argument you
need to supply is the filename.
Parameters
----------
filename : str or file
file name or file to write the plot to
plot : ggplot
plot to save, defaults to last plot displayed
format : str
image format to use, automatically extract from
file name extension
path : str
path to save plot to (if you just want to set path and
not filename)
scale : number
scaling factor
width : number
width (defaults to the width of current plotting window)
height : number
height (defaults to the height of current plotting window)
units : str
units for width and height when either one is explicitly
specified (in, cm, or mm)
dpi : number
dpi to use for raster graphics
limitsize : bool
when `True` (the default), ggsave will not save images
larger than 50x50 inches, to prevent the common error
of specifying dimensions in pixels.
kwargs : dict
additional arguments to pass to matplotlib `savefig()`
Returns
-------
None
Examples
--------
>>> from ggplot import *
>>> gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
>>> ggsave("filename.png", gg)
Notes
-----
Incompatibilities to ggplot2:
- `format` can be use as a alternative to `device`
- ggsave will happily save matplotlib plots, if that was the last plot
"""
fig_kwargs = {}
fig_kwargs.update(kwargs)
# This is the case when we just use "ggsave(plot)"
if hasattr(filename, "draw"):
plot, filename = filename, plot
if plot is None:
figure = plt.gcf()
else:
if hasattr(plot, "draw"):
figure = plot.draw()
else:
raise Exception("plot is not a ggplot object")
if format and device:
raise Exception("Both 'format' and 'device' given: only use one")
# in the end the imageformat is in format
if device:
format = device
if format:
if not format in figure.canvas.get_supported_filetypes():
raise Exception("Unknown format: {0}".format(format))
fig_kwargs["format"] = format
if filename is None:
if plot:
# ggplot2 defaults to pdf
filename = str(plot.__hash__()) + "." +(format if format else "pdf")
else:
# ggplot2 has a way to get to the last plot, but we currently dont't
raise Exception("No filename given: please supply a filename")
if not isinstance(filename, six.string_types):
# so probably a file object
if format is None:
raise Exception("filename is not a string and no format given: please supply a format!")
if path:
filename = os.path.join(path, filename)
if units not in ["in", "cm", "mm"]:
raise Exception("units not 'in', 'cm', or 'mm'")
to_inch = {"in":lambda x:x,"cm":lambda x: x / 2.54, "mm":lambda x: x * 2.54 * 10}
from_inch = {"in":lambda x:x,"cm":lambda x: x * 2.54, "mm":lambda x: x * 2.54 * 10}
w, h = figure.get_size_inches()
issue_size = False
if width is None:
width = w
issue_size = True
else:
width = to_inch[units](width)
if height is None:
height = h
issue_size = True
else:
height = to_inch[units](height)
try:
scale = float(scale)
except:
raise Exception("Can't convert scale argument to a number: {0}".format(scale))
# ggplot2: if you specify a width *and* a scale, you get the width*scale image!
width = width * scale
height = height * scale
if issue_size:
msg = "Saving {0} x {1} {2} image.\n".format(from_inch[units](width), from_inch[units](height), units)
sys.stderr.write(msg)
if limitsize and (width > 25 or height > 25):
msg = "Dimensions exceed 25 inches (height and width are specified in inches/cm/mm, not pixels)." + \
" If you are sure you want these dimensions, use 'limitsize=False'."
raise Exception(msg)
fig_kwargs["dpi"] = dpi
#savefig(fname, dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1,
# frameon=None)
try:
figure.set_size_inches(width,height)
figure.savefig(filename, **fig_kwargs)
finally:
# restore the sizes
figure.set_size_inches(w,h)
# close figure, if it was drawn by ggsave
if not plot is None:
plt.close(figure)
def add_ggplotrc_params(obj):
# ggplotrc defaults
if "HOME" in os.environ:
ggrc = os.path.join(os.environ["HOME"], ".ggplotrc")
try:
klass = obj.__class__.__name__
ggrc = json.load(open(ggrc, 'r'))
if klass in ggrc:
for k, v in ggrc[klass].items():
setattr(obj, k, v)
except:
pass
|
bsd-2-clause
|
wronk/mne-python
|
mne/preprocessing/tests/test_xdawn.py
|
2
|
5154
|
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import os.path as op
from nose.tools import (assert_equal, assert_raises)
from numpy.testing import assert_array_equal
from mne import (io, Epochs, read_events, pick_types,
compute_raw_covariance)
from mne.utils import requires_sklearn, run_tests_if_main
from mne.preprocessing.xdawn import Xdawn
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
tmin, tmax = -0.1, 0.2
event_id = dict(cond2=2, cond3=3)
def _get_data():
raw = io.read_raw_fif(raw_fname, add_eeg_ref=False, verbose=False,
preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False,
ecg=False, eog=False,
exclude='bads')[::8]
return raw, events, picks
def test_xdawn_init():
"""Test init of xdawn."""
# init xdawn with good parameters
Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None)
# init xdawn with bad parameters
assert_raises(ValueError, Xdawn, correct_overlap=42)
def test_xdawn_fit():
"""Test Xdawn fit."""
# get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# =========== Basic Fit test =================
# test base xdawn
xd = Xdawn(n_components=2, correct_overlap='auto',
signal_cov=None, reg=None)
xd.fit(epochs)
# with this parameters, the overlapp correction must be False
assert_equal(xd.correct_overlap, False)
# no overlapp correction should give averaged evoked
evoked = epochs['cond2'].average()
assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
# ========== with signal cov provided ====================
# provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov, reg=None)
xd.fit(epochs)
# provide ndarray
signal_cov = np.eye(len(picks))
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov, reg=None)
xd.fit(epochs)
# provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov, reg=None)
assert_raises(ValueError, xd.fit, epochs)
# provide another type
signal_cov = 42
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov, reg=None)
assert_raises(ValueError, xd.fit, epochs)
# fit with baseline correction and ovverlapp correction should throw an
# error
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=(None, 0), verbose=False)
xd = Xdawn(n_components=2, correct_overlap=True)
assert_raises(ValueError, xd.fit, epochs)
def test_xdawn_apply_transform():
"""Test Xdawn apply and transform."""
# get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
n_components = 2
# Fit Xdawn
xd = Xdawn(n_components=n_components, correct_overlap='auto')
xd.fit(epochs)
# apply on raw
xd.apply(raw)
# apply on epochs
xd.apply(epochs)
# apply on evoked
xd.apply(epochs.average())
# apply on other thing should raise an error
assert_raises(ValueError, xd.apply, 42)
# transform on epochs
xd.transform(epochs)
# transform on ndarray
xd.transform(epochs._data)
# transform on someting else
assert_raises(ValueError, xd.transform, 42)
@requires_sklearn
def test_xdawn_regularization():
"""Test Xdawn with regularization."""
# get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# test xdawn with overlap correction
xd = Xdawn(n_components=2, correct_overlap=True,
signal_cov=None, reg=0.1)
xd.fit(epochs)
# ========== with cov regularization ====================
# ledoit-wolf
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg='ledoit_wolf')
xd.fit(epochs)
# oas
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg='oas')
xd.fit(epochs)
# with shrinkage
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg=0.1)
xd.fit(epochs)
# with bad shrinkage
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg=2)
assert_raises(ValueError, xd.fit, epochs)
run_tests_if_main()
|
bsd-3-clause
|
jni/ray
|
ray/viz.py
|
3
|
8590
|
from annotefinder import AnnoteFinder
from math import ceil
import numpy as np
import scipy
import evaluate
import morpho
import matplotlib
from scipy.ndimage import label
plt = matplotlib.pyplot
cm = plt.cm
from itertools import cycle
label=scipy.ndimage.measurements.label
center_of_mass=scipy.ndimage.measurements.center_of_mass
###########################
# VISUALIZATION FUNCTIONS #
###########################
def imshow_grey(im):
return plt.imshow(im, cmap=plt.cm.gray, interpolation='nearest')
def imshow_jet(im):
return plt.imshow(im, cmap=plt.cm.jet, interpolation='nearest')
def imshow_rand(im):
rcmap = matplotlib.colors.ListedColormap(np.concatenate(
(np.zeros((1,3)), np.random.rand(ceil(im.max()), 3))
))
return plt.imshow(im, cmap=rcmap, interpolation='nearest')
def draw_seg(seg, im):
out = zeros_like(img)
labels = unique(seg)
if (seg==0).any():
labels = labels[1:]
for u in labels:
color = img[seg==u].mean(axis=0)
out[seg==u] = color
return out
def inspect_segs_3D(*args, **kwargs):
"""Show corresponding slices side by side in multiple segmentations."""
z = 0
if kwargs.has_key('z'):
z = kwargs['z']
axis=-1
if kwargs.has_key('axis'):
axis = kwargs['axis']
numplots = 0
im = None
if kwargs.has_key('image'):
im = kwargs['image']
numplots += 1
fignum = 1
if kwargs.has_key('fignum'):
fignum = kwargs['fignum']
prob = None
if kwargs.has_key('prob'):
prob = kwargs['prob']
numplots += 1
numplots += len(args)
plot_arrangements = []
for i in range(1,4):
for j in range(i,4):
plot_arrangements.append((i*j, i,j))
# first plot arrangement
plot_arrangement = [(i,j) for p,i,j in plot_arrangements
if p >= numplots][0]
fig = plt.figure(fignum)
current_subplot = 1
if im is not None:
plt.subplot(*plot_arrangement+(current_subplot,))
imshow_grey(im.swapaxes(0,axis)[z])
current_subplot += 1
if prob is not None:
plt.subplot(*plot_arrangement+(current_subplot,))
imshow_jet(prob.swapaxes(0,axis)[z])
current_subplot += 1
for i, j in enumerate(range(current_subplot, numplots+1)):
plt.subplot(*plot_arrangement+(j,))
imshow_rand(args[i].swapaxes(0,axis)[z])
return fig
def plot_vi(a, history, gt, fig=None):
"""Plot the VI from segmentations based on Rag and sequence of merges."""
v = []
n = []
seg = a.get_segmentation()
for i in history:
seg[seg==i[1]] = i[0]
v.append(evaluate.vi(seg, gt))
n.append(len(np.unique(seg)-1))
if fig is None:
fig = plt.figure()
plt.plot(n, v, figure = fig)
plt.xlabel('Number of segments', figure = fig)
plt.ylabel('vi', figure = fig)
def plot_vi_breakdown_panel(px, h, title, xlab, ylab, hlines, **kwargs):
x = scipy.arange(max(min(px),1e-10), max(px), (max(px)-min(px))/100.0)
for val in hlines:
plt.plot(x, val/x, c='gray', ls=':')
plt.scatter(px, h, label=title, **kwargs)
af = AnnoteFinder(px, h, [str(i) for i in range(len(px))],
xtol=0.005, ytol=0.005, xmin=-0.05*max(px), ymin=-0.05*max(px),
xmax = 1.05*max(px), ymax=1.05*max(h))
plt.connect('button_press_event', af)
plt.xlim(xmin=-0.05*max(px), xmax=1.05*max(px))
plt.ylim(ymin=-0.05*max(h), ymax=1.05*max(h))
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
def plot_vi_breakdown(seg, gt, ignore_seg=[], ignore_gt=[],
hlines=None, subplot=False, **kwargs):
"""Plot conditional entropy H(Y|X) vs P(X) for both seg|gt and gt|seg."""
plt.ion()
pxy,px,py,hxgy,hygx,lpygx,lpxgy = evaluate.vi_tables(seg,gt,
ignore_seg=ignore_seg, ignore_gt=ignore_gt)
cu = -px*lpygx
co = -py*lpxgy
if hlines is None:
hlines = []
elif hlines == True:
hlines = 10
if type(hlines) == int:
minc = min(cu[cu!=0].min(), co[co!=0].min())
maxc = max(cu[cu!=0].max(), co[co!=0].max())
hlines = np.arange(maxc/hlines, maxc, maxc/hlines)
plt.figure()
if subplot: plt.subplot(1,2,1)
plot_vi_breakdown_panel(px, -lpygx,
'False merges', 'p(S=seg)', 'H(G|S=seg)',
hlines, c='blue', **kwargs)
if subplot: plt.subplot(1,2,2)
plot_vi_breakdown_panel(py, -lpxgy,
'False splits', 'p(G=gt)', 'H(S|G=gt)',
hlines, c='orange', **kwargs)
if not subplot:
plt.title('vi contributions by body.')
plt.legend(loc='upper right', scatterpoints=1)
plt.xlabel('Segment size (fraction of volume)', fontsize='large')
plt.ylabel('Conditional entropy (bits)', fontsize='large')
xmax = max(px.max(), py.max())
plt.xlim(-0.05*xmax, 1.05*xmax)
ymax = max(-lpygx.min(), -lpxgy.min())
plt.ylim(-0.05*ymax, 1.05*ymax)
def plot_vi_parts(*args, **kwargs):
kwargs['subplot'] = True
plot_vi_breakdown(*args, **kwargs)
def add_opts_to_plot(ars, colors='k', markers='^', **kwargs):
if type(colors) not in [list, tuple]:
colors = [colors]
if len(colors) < len(ars):
colors = cycle(colors)
if type(markers) not in [list, tuple]:
markers = [markers]
if len(markers) < len(ars):
markers = cycle(markers)
points = []
for ar, c, m in zip(ars, colors, markers):
opt = ar[:,ar.sum(axis=0).argmin()]
points.append(plt.scatter(opt[0], opt[1], c=c, marker=m, **kwargs))
return points
def add_nats_to_plot(ars, tss, stops=0.5, colors='k', markers='o', **kwargs):
if type(colors) not in [list, tuple]: colors = [colors]
if len(colors) < len(ars): colors = cycle(colors)
if type(markers) not in [list, tuple]: markers = [markers]
if len(markers) < len(ars): markers = cycle(markers)
if type(stops) not in [list, tuple]: stops = [stops]
if len(stops) < len(ars): stops = cycle(stops)
points = []
for ar, ts, stop, c, m in zip(ars, tss, stops, colors, markers):
nat = ar[:,np.flatnonzero(ts<stop)[-1]]
points.append(plt.scatter(nat[0], nat[1], c=c, marker=m, **kwargs))
return points
def plot_split_vi(ars, best=None, colors='k', linespecs='-',
addopt=None, addnat=None, **kwargs):
if type(ars) not in [list, tuple]: ars = [ars]
if type(colors) not in [list, tuple]: colors = [colors]
if len(colors) < len(ars): colors = cycle(colors)
if type(linespecs) not in [list, tuple]: linespecs = [linespecs]
if len(linespecs) < len(ars): linespecs = cycle(linespecs)
lines = []
for ar, color, linespec in zip(ars, colors, linespecs):
lines.append(plt.plot(ar[0], ar[1], c=color, ls=linespec, **kwargs))
if best is not None:
lines.append(plt.scatter(
best[0], best[1],
c=kwargs.get('best-color', 'k'), marker=(5,3,0), **kwargs)
)
return lines
def jet_transparent(im, alpha=0.5):
im = cm.jet(im.astype(np.double)/im.max(), alpha=alpha)
im[(im[...,:3]==np.array([0,0,0.5])).all(axis=-1)] = np.array([0,0,0.5,0])
return im
def show_merge(im, bdr, z, ax=0, alpha=0.5, **kwargs):
plt.figure(figsize=kwargs.get('figsize', (3.25,3.25)))
im = im.swapaxes(0,ax)[z]
bdr = bdr.swapaxes(0,ax)[z]
bdr = jet_transparent(bdr, alpha)
imshow_grey(im)
plt.imshow(bdr)
plt.xticks([])
plt.yticks([])
def show_merge_3D(g, n1, n2, **kwargs):
im = kwargs.get('image', None)
alpha = kwargs.get('alpha', 0.5)
fignum = kwargs.get('fignum', 10)
bdr = np.zeros(g.segmentation.shape, np.uint8)
bdri = list(g[n1][n2]['boundary'])
bdr.ravel()[bdri] = 3
bdr.ravel()[list(g.node[n1]['extent'])] = 1
bdr.ravel()[list(g.node[n2]['extent'])] = 2
bdr = morpho.juicy_center(bdr, g.pad_thickness)
x, y, z = np.array(center_of_mass(bdr==3)).round().astype(np.uint32)
fig = plt.figure(fignum)
bdr_cmapped = jet_transparent(bdr, alpha)
plt.subplot(221); imshow_grey(im[:,:,z]); \
plt.imshow(bdr_cmapped[:,:,z], interpolation='nearest')
plt.subplot(222); imshow_grey(im[:,y,:]); \
plt.imshow(bdr_cmapped[:,y,:], interpolation='nearest')
plt.subplot(223); imshow_grey(im[x,:,:]); \
plt.imshow(bdr_cmapped[x,:,:], interpolation='nearest')
plt.subplot(224); _ = plt.hist(g.probabilities_r[bdri][:,0], bins=25)
return bdr, (x,y,z)
|
mit
|
fbagirov/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
raghavrv/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
MJuddBooth/pandas
|
pandas/tests/extension/base/__init__.py
|
3
|
2104
|
"""Base test suite for extension arrays.
These tests are intended for third-party libraries to subclass to validate
that their extension arrays and dtypes satisfy the interface. Moving or
renaming the tests should not be done lightly.
Libraries are expected to implement a few pytest fixtures to provide data
for the tests. The fixtures may be located in either
* The same module as your test class.
* A ``conftest.py`` in the same directory as your test class.
The full list of fixtures may be found in the ``conftest.py`` next to this
file.
.. code-block:: python
import pytest
from pandas.tests.extension.base import BaseDtypeTests
@pytest.fixture
def dtype():
return MyDtype()
class TestMyDtype(BaseDtypeTests):
pass
Your class ``TestDtype`` will inherit all the tests defined on
``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``
wherever the test requires it. You're free to implement additional tests.
All the tests in these modules use ``self.assert_frame_equal`` or
``self.assert_series_equal`` for dataframe or series comparisons. By default,
they use the usual ``pandas.testing.assert_frame_equal`` and
``pandas.testing.assert_series_equal``. You can override the checks used
by defining the staticmethods ``assert_frame_equal`` and
``assert_series_equal`` on your base test class.
"""
from .casting import BaseCastingTests # noqa
from .constructors import BaseConstructorsTests # noqa
from .dtype import BaseDtypeTests # noqa
from .getitem import BaseGetitemTests # noqa
from .groupby import BaseGroupbyTests # noqa
from .interface import BaseInterfaceTests # noqa
from .methods import BaseMethodsTests # noqa
from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa
from .printing import BasePrintingTests # noqa
from .reduce import BaseNoReduceTests, BaseNumericReduceTests, BaseBooleanReduceTests # noqa
from .missing import BaseMissingTests # noqa
from .reshaping import BaseReshapingTests # noqa
from .setitem import BaseSetitemTests # noqa
from .io import BaseParsingTests # noqa
|
bsd-3-clause
|
phobson/statsmodels
|
examples/python/robust_models_0.py
|
33
|
2992
|
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
|
bsd-3-clause
|
trankmichael/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
khkaminska/scikit-learn
|
examples/cluster/plot_birch_vs_minibatchkmeans.py
|
333
|
3694
|
"""
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
bsd-3-clause
|
julienmalard/Tinamit
|
pruebas/test_calib_ecs.py
|
1
|
5118
|
import os
import unittest
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats as estad
from tinamit.calibs.ec import CalibradorEcOpt, CalibradorEcBayes
from tinamit.datos.bd import BD
from tinamit.datos.fuente import FuenteDic
from tinamit.geog.región import gen_lugares
from tinamit.ejemplos import obt_ejemplo
try:
import pymc3 as pm
import theano as thn
except ImportError:
pm = thn = None
dir_act = os.path.split(__file__)[0]
arch_csv_geog = obt_ejemplo('geog_guate/geog_guate.csv')
arch_mds = os.path.join(dir_act, 'recursos/mds/prueba_para_calib.mdl')
calibradores = {'opt': CalibradorEcOpt, 'bayes': CalibradorEcBayes}
if pm is None or thn.configdefaults.rc != 0: # saltar si no hay pymc3 o si no hay compilador c
calibradores.pop('bayes')
class TestCalibrador(unittest.TestCase):
@classmethod
def setUpClass(cls):
n_obs = 50
datos_x = np.random.rand(n_obs)
ec = 'y = a*x + b'
cls.paráms = {'a': 2.4, 'b': -5}
cls.clbrds = {ll: v(ec=ec, paráms=list(cls.paráms)) for ll, v in calibradores.items()}
datos_y = cls.paráms['a'] * datos_x + cls.paráms['b'] + np.random.normal(0, 0.1, n_obs)
cls.bd_datos = BD(
fuentes=FuenteDic({'y': datos_y, 'x': datos_x, 'f': np.arange(n_obs)}, 'Datos generados', fechas='f')
)
def test_calibración_sencilla(símismo):
líms = {
'sin_líms': None,
'un_lím': {'a': (0, None), 'b': (None, 0)},
'dos_líms': {'a': (0, 10), 'b': (-10, -1)}
}
for nmbr, clbrd in símismo.clbrds.items():
for lm in líms:
with símismo.subTest(método=nmbr, líms=lm):
calibs = clbrd.calibrar(líms_paráms=líms[lm], bd=símismo.bd_datos)
est = [calibs[p]['cumbre'] for p in símismo.paráms]
val = list(símismo.paráms.values())
npt.assert_allclose(est, val, rtol=0.1)
def test_calibrador_sin_var_y(símismo):
with símismo.assertRaises(ValueError):
CalibradorEcOpt(ec='a*x+b', paráms=list(símismo.paráms))
class TestCalibGeog(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.paráms = prms = {
'a': {'701': 3.4, '708': 3, '1001': 10},
'b': {'701': -1.5, '708': -1.1, '1001': -3}
} # Nota: ¡a en (0, +inf) y b en (-inf, +inf) según los límites en el modelo externo!
cls.ec = 'y = a*x + b'
n_obs = {'701': 500, '708': 50, '1001': 500}
datos_x = {lg: np.random.rand(n) for lg, n in n_obs.items()}
datos_y = {
lg: datos_x[lg] * prms['a'][lg] + prms['b'][lg] + np.random.normal(0, 0.1, n_obs[lg])
for lg in n_obs
} # y = a*x + b
lugares = [x for ll, v in datos_x.items() for x in [ll] * v.size]
x = [i for v in datos_x.values() for i in v]
y = [i for v in datos_y.values() for i in v]
cls.clbrds = {ll: v(ec=cls.ec, paráms=['a', 'b']) for ll, v in calibradores.items()}
fchs = pd.date_range(0, periods=len(x))
cls.bd = BD(FuenteDic(
{'lugar': lugares, 'x': x, 'y': y, 'f': fchs}, 'Datos geográficos', lugares='lugar', fechas='f'
))
cls.lugar = gen_lugares(arch_csv_geog, nivel_base='País', nombre='Iximulew')
def test_calibración_geog_con_escalas(símismo):
"""
Calibramos una geografía con distintas escalas (municipios y departamentos).
"""
líms_paráms = {'a': (0, 50)}
for m in calibradores:
with símismo.subTest(método=m):
clbrd = calibradores[m](símismo.ec, paráms=['a', 'b'])
calibs = clbrd.calibrar(símismo.bd, lugar=símismo.lugar, líms_paráms=líms_paráms)
val = [símismo.paráms[p][lg] for p in símismo.paráms for lg in símismo.paráms[p]]
if m == 'opt':
est = [calibs[lg][p]['cumbre'] for p in símismo.paráms for lg in símismo.paráms[p]]
npt.assert_allclose(val, est, rtol=0.2)
else:
est = [calibs[lg][p]['dist'] for p in símismo.paráms for lg in símismo.paráms[p]]
símismo._verificar_aprox_bayes(val, est)
def test_calibración_bayes_sin_mod_jerárquíco(símismo):
"""
Calibramos en una geografía sin modelo jerárquico (región por región sin información a priori).
"""
if 'bayes' in calibradores:
calibs = símismo.clbrds['bayes'].calibrar(símismo.bd, lugar=símismo.lugar, jerárquico=False)
val = [símismo.paráms[lg][p] for lg in símismo.paráms for p in símismo.paráms[lg]]
est = [calibs[p][lg]['dist'] for lg in símismo.paráms for p in símismo.paráms[lg]]
símismo._verificar_aprox_bayes(val, est)
@staticmethod
def _verificar_aprox_bayes(val, est, intvl=99):
npt.assert_allclose([estad.percentileofscore(e, v) for e, v in zip(est, val)], 50, atol=intvl / 2)
|
gpl-3.0
|
sergej-C/dl_utils
|
caffe_utils.py
|
1
|
13611
|
import sys
import os
import matplotlib.pyplot as plt
import numpy as np ; na = np.newaxis
import caffe
import errno
class caffe_utils():
MU = None
g_transformer = None
CAFFE_PATH=''
EXAMPLE_PATH=''
model_def = ''
model_wights = ''
CAFFE_TEST_MODELS_PATH=''
def __init__(self, caffe_path=None, caffe_test_model_path=None, caffe_test_model_name='caffe_test_models', debug=False, mode=1):
"""
if caffe_test_model_path is not set, current working dir is selected
and a folder with name caffe_test_model_name is created
if caffe_path is None, searching for environ variable CAFFE_PATH
default set gpu mode
"""
sys.path.append(self.CAFFE_PATH+'/python')
if caffe_path==None:
self.CAFFE_PATH=os.environ.get('CAFFE_PATH')
if self.CAFFE_PATH=='':
print 'error caffe path not setted!!'
else:
self.CAFFE_PATH=caffe_path
self.EXAMPLE_PATH=self.CAFFE_PATH+'/examples'
sys.path.append(self.CAFFE_PATH+'/python')
self.model_def = self.CAFFE_PATH + '/models/bvlc_reference_caffenet/deploy.prototxt'
self.model_weights = self.CAFFE_PATH + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
self.DEBUG_MODE=debug
if caffe_test_model_path==None:
self.CAFFE_TEST_MODELS_PATH = os.getcwd() + '/'+caffe_test_model_name
if not os.path.exists(self.CAFFE_TEST_MODELS_PATH):
try:
os.mkdir(self.CAFFE_TEST_MODELS_PATH)
print "created caffe test models folder as {}".format(self.CAFFE_TEST_MODELS_PATH)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
else:
print "caffe test models folder exists: {}".format(self.CAFFE_TEST_MODELS_PATH)
self.change_mode(mode)
def change_mode(self, mode=1):
if mode==0:
caffe.set_mode_cpu()
self.mode='cpu'
elif mode==1:
caffe.set_mode_gpu()
self.mode='gpu'
else:
print "error mode can be either 1=gpu or 0=cpu, setting cpu mode as default"
self.mode = 'cpu'
caffe.set_mode_cpu()
def get_caffe_path(self):
return self.CAFFE_PATH
def get_caffe_test_models_path(self):
return self.CAFFE_TEST_MODELS_PATH
def get_images_mean(self):
if self.MU is None:
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mu = np.load(self.CAFFE_PATH + '/python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values
print 'mean-subtracted values:', zip('BGR', mu)
self.MU = mu
return mu
else:
return self.MU
def get_transformer(self, net, mu=None, layer_name='data', swap_channels=True):
if mu==None:
mu=self.get_images_mean()
if self.g_transformer is None:
# create transformer for the input called 'data'
transformer = caffe.io.Transformer({'data': net.blobs[layer_name].data.shape})
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
if swap_channels:
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
self.g_transformer = transformer
return transformer
else:
return self.g_transformer
def reshape_input_in_net(self, net, batch_size=1, channels=3, width=227, height=227, layer_name='data'):
# set the size of the input (we can skip this if we're happy
# with the default; we can also change it later, e.g., for different batch sizes)
net.blobs[layer_name].reshape(batch_size, # batch size
channels, # 3-channel (BGR) images
height, width) # image size is 227x227 or
def preprocess_image_in_net(
self,
net,
img_path=None,
show=False,
batch_size=1, channels=3, width=227, height=227
):
"""
gets image and apply transformation for feeding it into a caffe layer
"""
if img_path==None:
img_path = self.CAFFE_PATH + '/examples/images/cat.jpg'
transformer = self.get_transformer(net)
self.reshape_input_in_net(net, batch_size=batch_size, channels=channels, width=width, height=height)
image = caffe.io.load_image(img_path)
transformed_image = transformer.preprocess('data', image)
if show:
plt.imshow(image)
return transformed_image
def forward_image_to_net(
self,
net,
data_layer_name='data',
img_path=None,
show=False,
batch_size=1, channels=3, width=227, height=227
):
if img_path==None:
img_path = self.CAFFE_PATH + '/examples/images/cat.jpg'
prep_img = self.preprocess_image_in_net(
net,
show=show,
img_path=img_path,
batch_size=batch_size,
channels=channels,
width=width,
height=height)
# copy the image data into the memory allocated for the net
net.blobs[data_layer_name].data[...] = prep_img
### perform classification
output = net.forward()
return output
def deprocess_img(self, transformer_out, show=False):
deproc_img = self.g_transformer.deprocess('data', transformer_out)
if show:
plt.imshow(deproc_img)
return deproc_img
def show_filters(self, net, use_custom_min_max=True):
# forward the data
net.forward()
# create a plot
plt.figure()
# set min and max value of imshow based on data min and max
filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()
print net.blobs['conv'].data.shape
for i in range(3):
plt.subplot(1,4,i+2)
plt.title("filter #{} output".format(i))
plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)
# adjust spaces
plt.tight_layout()
# no axis
plt.axis('off')
# view color range
#cbar = plt.colorbar()
def vis_square(self, data):
"""Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data); plt.axis('off')
def get_caffe_net(self, mode=caffe.TEST):
caffe_net = self.create_net(
self.model_def, # defines the structure of the model
self.model_weights, # contains the trained weights
mode=mode) # use test mode (e.g., don't perform dropout)
return caffe_net
def create_net(self, proto_filename, path=None, weights_file='', mode=caffe.TEST):
"""
create a net from the specified proto file searched inside path (default caffe_test_model_path)
file searched as proto_filename.prototxt file
default mode = caffe.TEST
"""
if path==None:
path=self.CAFFE_TEST_MODELS_PATH
mpath = path + '/' + proto_filename + '.prototxt'
if self.DEBUG_MODE:
print "opening model in {}, weights {}, mode {}".format(
mpath,
weights_file,
mode
)
if not os.path.exists(mpath):
print "error, file not exists! {}".format(mpath)
return
if weights_file != '':
if not os.path.exists(weights_file):
print "error, file not exists! {}".format(weights_file)
return
if weights_file!='':
return caffe.Net(mpath, weights_file, mode)
else:
return caffe.Net(mpath, mode)
def get_imagenet_labels(self):
labels = []
labels_file = self.CAFFE_PATH + '/data/ilsvrc12/synset_words.txt'
if not os.path.exists(labels_file):
print("ATTENTION!!! file synset_words.txt in caffe data path doesn't exist! download it, no imagenet labels...")
return False
else:
labels = np.loadtxt(labels_file, str, delimiter='\t')
return labels
def get_predicted_class(self, forward_out, batch_index=0, n_top=5):
"""
get the argmax of probabilities returned by a forward pass of a net [test on caffenet classifier, todo test on other models]
and relative labels from imagenets labels
for the batch sample at index batch_index [default the first]
return output_prob.argmax(), labels[index_max] (if file of imagenet exists in caffe data path)
"""
output_prob = forward_out['prob'][batch_index]
labels = self.get_imagenet_labels()
best_labels = ''
index_max = output_prob.argmax()
if labels!=False:
best_labels=labels[index_max]
top_inds = output_prob.argsort()[::-1][:n_top] # reverse sort and take five largest items
top_n_cats = zip(output_prob[top_inds], labels[top_inds])
return index_max, best_labels, top_n_cats
def classify_img(self, caffe_net=None, img_path=None, show=False):
if img_path==None:
img_path = self.CAFFE_PATH + '/examples/images/cat.jpg'
if caffe_net==None:
caffe_net=self.get_caffe_net()
output = self.forward_image_to_net(caffe_net, img_path=img_path, show=show)
return self.get_predicted_class(output)
def show_layers(self, net):
# for each layer, show the output shape
for layer_name, blob in net.blobs.iteritems():
print layer_name + '\t' + str(blob.data.shape)
def show_params(self, net):
for layer_name, param in net.params.iteritems():
print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)
def describe_net(self, net):
print("===================LAYERS=======================================")
self.show_layers(net)
print("-------------------PARAMS---------------------------------------")
self.show_params(net)
print("================================================================")
def vis_filters(self, net, layer_name='conv1', filter_index=0):
filters = net.params[layer_name][filter_index].data
self.vis_square(filters.transpose(0, 2, 3, 1)) #todo - valid for different filter size?
def vis_net_filters(self, net, layer_name):
for fidx in range(len(net.params[layer_name])):
self.vis_filters(net, layer_name, fidx)
def save_spec_to_file(self, net_spec, model_name, path=None, overwrite=False):
if path==None:
path=self.CAFFE_TEST_MODELS_PATH
fpath = path+'/'+model_name+'.prototxt'
if os.path.exists(fpath) and overwrite==False:
print "file already exists, specify overwrite=True for substitution"
return
with open(fpath, 'w') as f:
f.write(str(net_spec.to_proto()))
def print_net_params(self, net, lay_name):
if net.params.has_key(lay_name):
print('[== PARAMS ==]')
print('Wheights ====>')
prm = net.params[lay_name][0].data
print(prm.shape)
print(prm)
print('Bias ====>')
b = net.params[lay_name][1].data
print(b.shape)
print(b)
def print_net_blob_data(self, net, lay_name):
print('[== BLOB DATA ==]')
if net.blobs.has_key(lay_name):
bd = net.blobs[lay_name].data
print(bd.shape)
print bd
def print_net_data(self, net):
for ln,bl in net.blobs.iteritems():
print("[-------------- {} -------------]").format(ln)
self.print_net_blob_data(net, ln)
self.print_net_params(net, ln)
print("[--------------END {} -------------]\n\n").format(ln)
|
mit
|
fboers/jumegX
|
mft/jumeg_mft_plot.py
|
1
|
9768
|
"""
====================
Jumeg MFT Plotting.
====================
"""
import numpy as np
import matplotlib.pyplot as plt
# from nilearn.plotting import plot_stat_map
# from nilearn.image import index_img
from mne import SourceEstimate, VolSourceEstimate
from mne.transforms import invert_transform, apply_trans
def plot_global_cdv_dist(stcdata):
'''
Plot global cdv-distribution at time of max |cdv|
'''
print "##### Plot global cdv-distribution at time of max |cdv|:"
time_idx = np.argmax(np.max(stcdata, axis=0))
fig = plt.figure()
plt.xlim((0, stcdata.shape[0]+100))
plt.ylim((-0.1*np.max(stcdata[:, time_idx]),
1.1*np.max(stcdata[:, time_idx])))
cdvnmax = stcdata[:, time_idx]
print "cdvnmax.shape=", cdvnmax.shape
plt.plot(xrange(cdvnmax.shape[0]), cdvnmax)
plt.xlabel('n')
plt.ylabel('|cdv(t_i=%d|' % time_idx)
plt.savefig('testfig_cdvgtfixed.png')
plt.close()
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,
subject, subjects_dir):
'''
Plot the MFT sources at time point of peak.
'''
print "##### Attempting to plot:"
# cf. decoding/plot_decoding_spatio_temporal_source.py
vertices = [s['vertno'] for s in fwdmag['src']]
if len(vertices) == 1:
vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]
stc_feat = SourceEstimate(stcdata, vertices=vertices,
tmin=-0.2, tstep=tstep, subject=subject)
for hemi in ['lh', 'rh']:
brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,
transparent=True, clim='auto')
brain.show_view('lateral')
# use peak getter to move visualization to the time point of the peak
tmin = 0.095
tmax = 0.10
print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
if hemi == 'both':
vertno_max, time_idx = stc_feat.get_peak(hemi='rh', time_as_index=True,
tmin=tmin, tmax=tmax)
else:
vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,
tmin=tmin, tmax=tmax)
if hemi == 'lh':
comax = fwdmag['src'][0]['rr'][vertno_max]
print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
(hemi, vertno_max, time_idx), comax
elif len(fwdmag['src']) > 1:
comax = fwdmag['src'][1]['rr'][vertno_max]
print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
(hemi, vertno_max, time_idx), comax
print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',
scale_factor=0.6)
offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
if hemi == 'lh':
ifoci = [np.nonzero([stcdata[0:offsets[1],time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
cfoci = fwdmag['src'][0]['rr'][vfoci]
print "Coords of %d sel. vfoci: " % cfoci.shape[0]
print cfoci
print "vfoci: "
print vfoci
print "brain.geo['lh'].coords[vfoci] : "
print brain.geo['lh'].coords[vfoci]
elif len(fwdmag['src']) > 1:
ifoci = [np.nonzero([stcdata[offsets[1]:,time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
cfoci = fwdmag['src'][1]['rr'][vfoci]
print "Coords of %d sel. vfoci: " % cfoci.shape[0]
print cfoci
print "vfoci: "
print vfoci
print "brain.geo['rh'].coords[vfoci] : "
print brain.geo['rh'].coords[vfoci]
mrfoci = np.zeros(cfoci.shape)
invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
mrfoci = apply_trans(invmri_head_t['trans'],cfoci, move=True)
print "mrfoci: "
print mrfoci
# Just some blops:
bloblist = np.zeros((300,3))
for i in xrange(100):
bloblist[i,0] = float(i)
bloblist[i+100,1] = float(i)
bloblist[i+200,2] = float(i)
mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
brain.save_image('testfig_map_%s.png' % hemi)
brain.close()
def plot_cdv_distribution(fwdmag, stcdata):
'''
Plot cdv-distribution.
'''
print "##### Plot cdv-distribution:"
maxxpnt = np.max([len(s['vertno']) for s in fwdmag['src']])
iblck = -1
time_idx = np.argmax(np.max(stcdata, axis=0))
fig = plt.figure()
plt.xlim((0, maxxpnt + 100))
plt.ylim((-0.1 * np.max(stcdata[:, time_idx]),
1.1 * np.max(stcdata[:, time_idx])))
offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
print "offsets = ",offsets
for s in fwdmag['src']:
iblck = iblck + 1
cdvnmax = stcdata[offsets[iblck]:offsets[iblck]+offsets[iblck+1], time_idx]
print "cdvnmax.shape=", cdvnmax.shape
plt.plot(xrange(cdvnmax.shape[0]), np.sort(cdvnmax))
plt.xlabel('n')
plt.ylabel('|cdv(t_i=%d|' % time_idx)
plt.savefig('testfig_cdvtfixed.png')
plt.close()
def plot_max_amplitude_data(fwdmag, stcdata, tmin, tstep, subject, method='mft'):
print "##### Attempting to plot max. amplitude data:"
fig = plt.figure()
iblck = -1
offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
for s in fwdmag['src']:
iblck = iblck + 1
stc = VolSourceEstimate(stcdata[offsets[iblck]:offsets[iblck]+offsets[iblck+1],:], vertices=s['vertno'],
tmin=tmin, tstep=tstep, subject=subject)
# View activation time-series
plt.xlim((1e3*np.min(stc.times), 1e3*np.max(stc.times)))
plt.ylim((0, np.max(stcdata)))
plt.plot(1e3 * stc.times, np.max(stc.data, axis=0),
label=(('lh', 'rh'))[iblck])
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.savefig('testfig'+"{0:0=2d}".format(iblck)+'.png')
plt.close()
def plot_max_cdv_data(stc_mft, lhmrinds, rhmrinds):
''' Plot max CDV data.
'''
print "##### Attempting to plot max. cdv data:"
fig = plt.figure()
stcdata = stc_mft.data
plt.plot(1e3 * stc_mft.times, np.max(stcdata[lhmrinds[0],:], axis=0), label='lh')
plt.plot(1e3 * stc_mft.times, np.max(stcdata[rhmrinds[0],:], axis=0), label='rh')
plt.plot(1e3 * stc_mft.times, np.max(stcdata, axis=0), label='all')
plt.xlabel('time (ms)')
plt.ylabel('max(|cdv|) value')
plt.legend()
plt.savefig('testfig_cdvmax')
plt.close()
def plot_cdvsum_data(stc_mft, lhmrinds, rhmrinds):
'''Plot cdvsum data.
'''
print "##### Attempting to cdvsum data:"
fig = plt.figure()
stcdata = stc_mft.data
plt.plot(1e3 * stc_mft.times, np.sum(stcdata[lhmrinds[0],:],axis=0),label='lh')
plt.plot(1e3 * stc_mft.times, np.sum(stcdata[rhmrinds[0],:],axis=0),label='rh')
plt.plot(1e3 * stc_mft.times, np.sum(stcdata,axis=0),label='all')
plt.xlabel('time (ms)')
plt.ylabel('sum(|cdv|) value')
plt.legend()
plt.savefig('testfig_cdvsum')
plt.close()
def plot_quality_data(qualmft, stc_mft):
'''Plot quality data.
'''
print "##### Attempting to plot quality data:"
fig = plt.figure()
# relerrscal = pow(10,-int(np.log10(np.max(qualmft['relerr'][:]))))
scalexp = -int(np.log10(np.max(qualmft['relerr'][:])))-1
relerrscal = pow(10,scalexp)
ls = '-'
#if fwdname.rfind('vol')>0: ls = '--'
plt.ylim((0,1.05))
plt.plot(1e3 * stc_mft.times, relerrscal*qualmft['relerr'][:],'r'+ls, label='relerr')
plt.plot(1e3 * stc_mft.times, qualmft['rdmerr'][:], 'g'+ls, label='rdmerr')
plt.plot(1e3 * stc_mft.times, qualmft['mag'][:], 'b'+ls, label='mag')
plt.xlabel('time (ms)')
plt.ylabel('r: 10^%d*relerr, g: rdmerr, b: mag' % scalexp)
plt.legend(loc='center right')
plt.savefig('testfig_qual')
plt.close()
# TODO cdmdata computation to be added into apply_mft
def plot_cdm_data(stc_mft, cdmdata):
'''Plot CDM data.
'''
print "##### Attempting to plot cdm data:"
fig = plt.figure()
plt.ylim((0,1.05))
plt.plot(1e3 * stc_mft.times, cdmdata[0, :], 'r', label='lh')
plt.plot(1e3 * stc_mft.times, cdmdata[1, :], 'g', label='rh')
plt.plot(1e3 * stc_mft.times, cdmdata[2, :], 'b', label='all')
# plt.plot(1e3 * stc_mft.times, cdmdata[3,:],'m',label='lh,fit')
# plt.plot(1e3 * stc_mft.times, cdmdata[4,:],'c',label='rh,fit')
# plt.plot(1e3 * stc_mft.times, cdmdata[5,:],'k',label='all,fit')
plt.xlabel('time (ms)')
plt.ylabel('cdm value')
plt.legend()
plt.savefig('testfig_cdm')
plt.close()
# TODO jlngdata computation to be added into apply_mft
def plot_jlong_data(stc_mft, jlngdata):
print "##### Attempting to plot jlong data:"
fig = plt.figure()
# plt.ylim((0,1.05))
plt.plot(1e3 * stc_mft.times, jlngdata[0, :], 'r', label='lh')
plt.plot(1e3 * stc_mft.times, jlngdata[1, :], 'g', label='rh')
plt.plot(1e3 * stc_mft.times, jlngdata[2, :], 'b', label='all')
plt.xlabel('time (ms)')
plt.ylabel('j_long value')
plt.legend()
plt.savefig('testfig_jlong')
plt.close()
|
bsd-3-clause
|
MTgeophysics/mtpy
|
mtpy/imaging/plotstrike2d.py
|
1
|
51163
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 18:28:24 2013
@author: jpeacock-pr
"""
#==============================================================================
import matplotlib.pyplot as plt
import numpy as np
import os
from matplotlib.ticker import MultipleLocator
import mtpy.imaging.mtplottools as mtpl
import mtpy.analysis.geometry as MTgy
#==============================================================================
class PlotStrike2D(object):
"""
PlotStrike will plot the strike estimated from the invariants, phase tensor
and the tipper in either a rose diagram of xy plot
plots the strike angle as determined by phase tensor azimuth (Caldwell et
al. [2004]) and invariants of the impedance tensor (Weaver et al. [2003]).
The data is split into decades where the histogram for each is plotted in
the form of a rose diagram with a range of 0 to 180 degrees.
Where 0 is North and 90 is East. The median angle of the period band is
set in polar diagram. The top row is the strike estimated from
the invariants of the impedance tensor. The bottom row is the azimuth
estimated from the phase tensor. If tipper is 'y' then the 3rd row is the
strike determined from the tipper, which is orthogonal to the induction
arrow direction.
Arguments:
----------
**fn_list** : list of strings
full paths to .edi files to plot
**z_object** : class mtpy.core.z.Z
object of mtpy.core.z. If this is input be sure the
attribute z.freq is filled. *default* is None
**mt_object** : class mtpy.imaging.mtplot.MTplot
object of mtpy.imaging.mtplot.MTplot
*default* is None
**fignum** : int
figure number to be plotted. *Default* is 1
**fs** : float
font size for labels of plotting. *Default* is 10
**dpi** : int
dots-per-inch resolution of figure, 300 is needed for
publications. *Default* is 300
**thetar** : float
angle of rotation clockwise positive. *Default* is 0
**ptol** : float
Tolerance level to match periods from different edi files.
*Default* is 0.05
**text_dict** : dictionary
*'pad' : float
padding of the angle label at the bottom of each
polar diagram. *Default* is 1.65
*'size' : float
font size
**plot_range** : [ 'data' | (period_min,period_max) ]
period range to estimate the strike angle. Options are:
* *'data'* for estimating the strike for all periods
in the data.
* (pmin,pmax) for period min and period max, input as
(log10(pmin),log10(pmax))
**plot_type** : [ 1 | 2 ]
-*1* to plot individual decades in one plot
-*2* to plot all period ranges into one polar diagram
for each strike angle estimation
**plot_tipper** : [ 'y' | 'n' ]
-*'y'* to plot the tipper strike
-*'n'* to not plot tipper strike
**pt_error_floor** : float
Maximum error in degrees that is allowed to estimate strike.
*Default* is None allowing all estimates to be used.
**fold** : [ True | False ]
*True to plot only from 0 to 180
*False to plot from 0 to 360
:Example: ::
>>> import os
>>> import mtpy.imaging.mtplot as mtplot
>>> edipath = r"/home/EDIFiles"
>>> edilist = [os.path.join(edipath,edi) for edi in os.listdir(edipath)
>>> ... if edi.find('.edi')>0]
>>> #---plot rose plots in decades with tipper and an error floor on pt
>>> strike = mtplot.plot_strike(fn_list=edilist, plot_type=1,\
pt_error_floor=5)
>>> #---plot all decades into one rose plot for each estimation---
>>> strike.plot_type = 2
>>> strike.redraw_plot()
>>> #---save the plot---
>>> strike.save_plot(r"/home/Figures")
'Figure saved to /home/Figures/StrikeAnalysis_.pdf'
Attributes:
-----------
-axhinv matplotlib.axes instance for invariant strike
-axhpt matplotlib.axes instance for phase tensor strike
-axhtip matplotlib.axes instance for tipper strike
-barinv matplotlib.axes.bar instance for invariant strike
-barpt matplotlib.axes.bar instance for pt strike
-bartr matplotlib.axes.bar instance for tipper strike
-bin_width width of histogram bins in degrees
-fig matplotlib.figure instance of plot
-fig_dpi dots-per-inch resolution of figure
-fig_num number of figure being plotted
-fig_size size of figure in inches
-fold boolean to fold angles to range from [0,180] or
[0,360]
-font_size font size of axes tick labels
-mt_list list of mtplot.MTplot instances containing all
the important information for each station
-period_tolerance tolerance to look for periods being plotted
-plot_range range of periods to plot
-plot_tipper string to tell program to plot induction arrows
-plot_type string to tell program how to plot strike angles
-plot_yn plot strike on instance creation
-pt_error_floor error floor to plot phase tensor strike, anything
above this error will not be plotted
-text_pad padding between text and rose diagram
-text_size font size of text labeling the mode of the histogram
-title_dict title dictionary
Methods:
--------
-plot plots the pseudo section
-redraw_plot on call redraws the plot from scratch
-save_figure saves figure to a file of given format
-update_plot updates the plot while still active
-writeTextFiles writes parameters of the phase tensor and tipper
to text files.
"""
def __init__(self, **kwargs):
fn_list = kwargs.pop('fn_list', None)
z_object_list = kwargs.pop('z_object_list', None)
tipper_object_list = kwargs.pop('tipper_object_list', None)
mt_object_list = kwargs.pop('mt_object_list', None)
#------Set attributes of the class-----------------
#--> get the inputs into a list of mt objects
self.mt_list = mtpl.get_mtlist(fn_list=fn_list,
z_object_list=z_object_list,
tipper_object_list=tipper_object_list,
mt_object_list=mt_object_list)
self._rot_z = kwargs.pop('rot_z', 0)
if isinstance(self._rot_z, float) or isinstance(self._rot_z, int):
self._rot_z = np.array([self._rot_z] * len(self.mt_list))
# if the rotation angle is an array for rotation of different
# freq than repeat that rotation array to the len(mt_list)
elif isinstance(self._rot_z, np.ndarray):
if self._rot_z.shape[0] != len(self.mt_list):
self._rot_z = np.repeat(self._rot_z, len(self.mt_list))
else:
pass
#--> set plot properties
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_size = kwargs.pop('fig_size', [7, 5])
self.plot_num = kwargs.pop('plot_num', 1)
self.plot_type = kwargs.pop('plot_type', 2)
self.plot_title = kwargs.pop('plot_title', None)
self.plot_range = kwargs.pop('plot_range', 'data')
self.plot_tipper = kwargs.pop('plot_tipper', 'n')
self.period_tolerance = kwargs.pop('period_tolerance', .05)
self.pt_error_floor = kwargs.pop('pt_error_floor', None)
self.fold = kwargs.pop('fold', True)
self.bin_width = kwargs.pop('bin_width', 5)
self.skew_threshold = kwargs.pop('skew_threshold', 3)
self.font_size = kwargs.pop('font_size', 7)
text_dict = kwargs.pop('text_dict', {})
try:
self.text_pad = text_dict['pad']
except KeyError:
self.text_pad = 0.6
try:
self.text_size = text_dict['size']
except KeyError:
self.text_size = self.font_size
# make a dictionary for plotting titles
self.title_dict = {}
self.title_dict[-5] = '10$^{-5}$--10$^{-4}$s'
self.title_dict[-4] = '10$^{-4}$--10$^{-3}$s'
self.title_dict[-3] = '10$^{-3}$--10$^{-2}$s'
self.title_dict[-2] = '10$^{-2}$--10$^{-1}$s'
self.title_dict[-1] = '10$^{-1}$--10$^{0}$s'
self.title_dict[0] = '10$^{0}$--10$^{1}$s'
self.title_dict[1] = '10$^{1}$--10$^{2}$s'
self.title_dict[2] = '10$^{2}$--10$^{3}$s'
self.title_dict[3] = '10$^{3}$--10$^{4}$s'
self.title_dict[4] = '10$^{4}$--10$^{5}$s'
self.title_dict[5] = '10$^{5}$--10$^{6}$s'
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
#---need to rotate data on setting rotz
def _set_rot_z(self, rot_z):
"""
need to rotate data when setting z
"""
# if rotation angle is an int or float make an array the length of
# mt_list for plotting purposes
if isinstance(rot_z, float) or isinstance(rot_z, int):
self._rot_z = np.array([rot_z] * len(self.mt_list))
# if the rotation angle is an array for rotation of different
# freq than repeat that rotation array to the len(mt_list)
elif isinstance(rot_z, np.ndarray):
if rot_z.shape[0] != len(self.mt_list):
self._rot_z = np.repeat(rot_z, len(self.mt_list))
else:
pass
for ii, mt in enumerate(self.mt_list):
mt.rotation_angle = self._rot_z[ii]
def _get_rot_z(self):
return self._rot_z
rot_z = property(fget=_get_rot_z, fset=_set_rot_z,
doc="""rotation angle(s)""")
def plot(self):
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = .07
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .09
plt.rcParams['figure.subplot.top'] = .90
plt.rcParams['figure.subplot.wspace'] = .2
plt.rcParams['figure.subplot.hspace'] = .4
bw = self.bin_width
histrange = (0, 360)
# set empty lists that will hold dictionaries with keys as the period
ptlist = []
tiprlist = []
# initialize some parameters
nc = len(self.mt_list)
nt = 0
kk = 0
for dd, mt in enumerate(self.mt_list):
#--> set the period
period = mt.period
# get maximum length of periods
if len(period) > nt:
nt = len(period)
# estimate where only the 2D sections are
dim_2d = MTgy.dimensionality(z_object=mt._Z,
skew_threshold=self.skew_threshold)
index_2d = np.where(dim_2d == 2)[0]
#------------get strike from phase tensor strike angle-------------
pt = mt.pt
az = (90 - pt.azimuth[index_2d]) % 360
az_err = pt.azimuth_err[index_2d]
# need to add 90 because pt assumes 0 is north and
# negative because measures clockwise.
# put an error max on the estimation of strike angle
if self.pt_error_floor:
az[np.where(az_err > self.pt_error_floor)] = 0.0
# make a dictionary of strikes with keys as period
mdictpt = dict([(ff, jj)
for ff, jj in zip(mt.period[index_2d], az)])
ptlist.append(mdictpt)
#-----------get tipper strike------------------------------------
tip = mt.Tipper
if tip.tipper is None:
tip.tipper = np.zeros((len(mt.period), 1, 2),
dtype='complex')
tip.compute_components()
# needs to be negative because measures clockwise
tipr = -tip.angle_real[index_2d]
tipr[np.where(tipr == 180.)] = 0.0
tipr[np.where(tipr == -180.)] = 0.0
# make sure the angle is between 0 and 360
tipr = tipr % 360
# make a dictionary of strikes with keys as period
tiprdict = dict([(ff, jj)
for ff, jj in zip(mt.period[index_2d], tipr)])
tiprlist.append(tiprdict)
#--> get min and max period
maxper = np.max([np.max(list(mm.keys())) for mm in ptlist if list(mm.keys())])
minper = np.min([np.min(list(mm.keys())) for mm in ptlist if list(mm.keys())])
# make empty arrays to put data into for easy manipulation
medpt = np.zeros((nt, nc))
medtipr = np.zeros((nt, nc))
# make a list of periods from the longest period list
plist = np.logspace(
np.log10(minper),
np.log10(maxper),
num=nt,
base=10)
pdict = dict([(ii, jj) for jj, ii in enumerate(plist)])
self._plist = plist
# put data into arrays
for ii, mm in enumerate(ptlist):
mperiod = list(mm.keys())
for jj, mp in enumerate(mperiod):
for kk in list(pdict.keys()):
if mp > kk * (1 - self.period_tolerance) and \
mp < kk * (1 + self.period_tolerance):
ll = pdict[kk]
medpt[ll, ii] = ptlist[ii][mp]
medtipr[ll, ii] = tiprlist[ii][mp]
else:
pass
# make the arrays local variables
self._medpt = medpt
self._medtp = medtipr
#-----Plot Histograms of the strike angles-----------------------------
if self.plot_range == 'data':
brange = np.arange(np.floor(np.log10(minper)),
np.ceil(np.log10(maxper)), 1)
else:
brange = np.arange(np.floor(self.plot_range[0]),
np.ceil(self.plot_range[1]), 1)
self._brange = brange
# font dictionary
fd = {'size': self.font_size, 'weight': 'normal'}
#------------------plot indivdual decades------------------------------
if self.plot_type == 1:
# plot specs
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
self.fig = plt.figure(self.fig_num, dpi=self.fig_dpi)
plt.clf()
nb = len(brange)
for jj, bb in enumerate(brange, 1):
# make subplots for invariants and phase tensor azimuths
if self.plot_tipper == 'n':
self.axhpt = self.fig.add_subplot(1, nb, jj, polar=True)
axlist = [self.axhpt]
if self.plot_tipper == 'y':
self.axhpt = self.fig.add_subplot(2, nb, jj, polar=True)
self.axhtip = self.fig.add_subplot(2, nb, jj + nb,
polar=True)
axlist = [self.axhpt, self.axhtip]
# make a list of indicies for each decades
binlist = []
for ii, ff in enumerate(plist):
if ff > 10**bb and ff < 10**(bb + 1):
binlist.append(ii)
# extract just the subset for each decade
gg = medpt[binlist, :]
if self.plot_tipper == 'y':
tr = medtipr[binlist, :]
# compute the historgram for the tipper strike
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=int(360/bw),
range=histrange)
# make a bar graph with each bar being width of bw degrees
bartr = self.axhtip.bar((trhist[1][:-1]) * np.pi / 180,
trhist[0],
width=bw * np.pi / 180)
# set color of the bars according to the number in that bin
# tipper goes from dark blue (low) to light blue (high)
for cc, bar in enumerate(bartr):
try:
fc = float(trhist[0][cc]) / trhist[0].max() * .9
except ZeroDivisionError:
fc = 1.0
bar.set_facecolor((0, 1 - fc / 2, fc))
# estimate the histogram for the decade for invariants and pt
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=int(360/bw),
range=histrange)
# plot the histograms
self.barpt = self.axhpt.bar((pthist[1][:-1]) * np.pi / 180,
pthist[0],
width=bw * np.pi / 180)
# set the color of the bars according to the number in that bin
# pt goes from green (low) to orange (high)
for cc, bar in enumerate(self.barpt):
try:
fc = float(pthist[0][cc]) / pthist[0].max() * .8
except ZeroDivisionError:
fc = 1.0
bar.set_facecolor((fc, 1 - fc, 0))
# make axis look correct with N to the top at 90.
for aa, axh in enumerate(axlist):
# set multiple locator to be every 15 degrees
axh.xaxis.set_major_locator(
MultipleLocator(30 * np.pi / 180))
# set labels on the correct axis
axh.xaxis.set_ticklabels(['', 'E', '', '',
'N', '', '',
'W', '', '',
'S', '', ''])
# make a light grid
axh.grid(alpha=.25)
# set pt axes properties
if aa == 0:
# limits go from -180 to 180 as that is how the angle
# is calculated
axh.set_xlim(0, 2 * np.pi)
# label plot with the mode of the strike angle
ptmode = (90 - pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]]) % 360
ptmedian = (90 - np.median(gg[np.nonzero(gg)])) % 360
ptmean = (90 - np.mean(gg[np.nonzero(gg)])) % 360
axh.text(np.pi, axh.get_ylim()[1] * self.text_pad,
'{0:.1f}$^o$'.format(ptmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': self.text_size},
bbox={'facecolor': (.9, .9, 0), 'alpha': .25})
# print out the results for the strike angles
print('-----Period Range {0:.3g} to {1:.3g} (s)-----'.format(10**bb,
10**(bb + 1)))
print(' *PT Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
ptmedian,
ptmode,
ptmean))
if self.plot_tipper != 'y':
print('\n')
#--> set title of subplot
axh.set_title(self.title_dict[bb], fontdict=fd,
bbox={'facecolor': 'white', 'alpha': .25})
#--> set the title offset
axh.titleOffsetTrans._t = (0, .1)
# set tipper axes properties
elif aa == 1:
# limits go from -180 to 180
axh.set_xlim(0, 2 * np.pi)
# label plot with mode
tpmode = (90 - trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]]) % 360
tpmedian = (90 - np.median(tr[np.nonzero(tr)])) % 360
tpmean = (90 - np.mean(tr[np.nonzero(tr)])) % 360
axh.text(np.pi, axh.get_ylim()[1] * self.text_pad,
'{0:.1f}$^o$'.format(tpmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': self.text_size},
bbox={'facecolor': (0, .1, .9), 'alpha': .25})
# print out statistics for strike angle
print(' *Tipper Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
tpmedian,
tpmode,
tpmode))
print('\n')
if nb > 5:
axh.set_title(self.title_dict[bb], fontdict=fd,
bbox={'facecolor': 'white', 'alpha': .25})
# set plot labels
if jj == 1:
if aa == 0:
axh.set_ylabel('PT Azimuth', fontdict=fd,
labelpad=self.font_size,
bbox={'facecolor': (.9, .9, 0),
'alpha': .25})
elif aa == 1:
axh.set_ylabel('Tipper Strike', fd,
labelpad=self.font_size,
bbox={'facecolor': (0, .1, .9),
'alpha': 0.25})
plt.setp(axh.yaxis.get_ticklabels(), visible=False)
print('Note: North is assumed to be 0 and the strike angle is measured' +\
'clockwise positive.')
plt.show()
#------------------Plot strike angles for all period ranges------------
elif self.plot_type == 2:
# plot specs
plt.rcParams['figure.subplot.left'] = .07
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .100
plt.rcParams['figure.subplot.top'] = .88
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .2
self.fig = plt.figure(self.fig_num,
self.fig_size,
dpi=self.fig_dpi)
plt.clf()
# make subplots for invariants and phase tensor azimuths
if self.plot_tipper == 'n':
self.axhpt = self.fig.add_subplot(1, 1, 1, polar=True)
axlist = [self.axhpt]
else:
self.axhpt = self.fig.add_subplot(1, 2, 1, polar=True)
self.axhtip = self.fig.add_subplot(1, 2, 2, polar=True)
axlist = [self.axhpt, self.axhtip]
# make a list of indicies for each decades
binlist = [pdict[ff] for ff in plist
if ff > 10**brange.min() and ff < 10**brange.max()]
# extract just the subset for each decade
gg = medpt[binlist, :]
# estimate the histogram for the decade for invariants and pt
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=int(360/bw),
range=histrange)
# plot the histograms
self.barpt = self.axhpt.bar((pthist[1][:-1]) * np.pi / 180,
pthist[0],
width=bw * np.pi / 180)
# set color of pt from green (low) to orange (high count)
for cc, bar in enumerate(self.barpt):
fc = float(pthist[0][cc]) / pthist[0].max() * .8
bar.set_facecolor((fc, 1 - fc, 0))
# plot tipper if desired
if self.plot_tipper == 'y':
tr = self._medtp[binlist, :]
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=int(360/bw),
range=histrange)
self.bartr = self.axhtip.bar((trhist[1][:-1]) * np.pi / 180,
trhist[0],
width=bw * np.pi / 180)
# set tipper color from dark blue (low) to light blue (high)
for cc, bar in enumerate(self.bartr):
try:
fc = float(trhist[0][cc]) / trhist[0].max() * .9
bar.set_facecolor((0, 1 - fc / 2, fc))
except ZeroDivisionError:
pass
# make axis look correct with N to the top at 90.
for aa, axh in enumerate(axlist):
# set major ticks to be every 30 degrees
axh.xaxis.set_major_locator(MultipleLocator(2 * np.pi / 12))
# set a light grid
axh.grid(alpha=0.25)
# set tick labels to be invisible
plt.setp(axh.yaxis.get_ticklabels(), visible=False)
# place the correct label at the cardinal directions
axh.xaxis.set_ticklabels(['', 'E', '', '',
'N', '', '',
'W', '', '',
'S', '', ''])
# set pt axes properties
if aa == 0:
axh.set_ylim(0, pthist[0].max())
ptmode = (90 - pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]]) % 360
ptmedian = (90 - np.median(gg[np.nonzero(gg)])) % 360
ptmean = (90 - np.mean(gg[np.nonzero(gg)])) % 360
axh.text(170 * np.pi / 180, axh.get_ylim()[1] * .65,
'{0:.1f}$^o$'.format(ptmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': self.text_size},
bbox={'facecolor': (.9, .9, 0), 'alpha': 0.25})
# print results of strike analysis for pt
print('-----Period Range {0:.3g} to {1:.3g} (s)-----'.format(10**brange[0],
10**brange[-1]))
print(' *PT Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
ptmedian,
ptmode,
ptmean))
if self.plot_tipper != 'y':
print('\n')
axh.set_title('PT Azimuth', fontdict=fd,
bbox={'facecolor': (.9, .9, 0), 'alpha': 0.25})
# set tipper axes properties
elif aa == 2:
axh.set_ylim(0, trhist[0].max())
tpmode = (90 - trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]]) % 360
tpmedian = (90 - np.median(tr[np.nonzero(tr)])) % 360
tpmean = (90 - np.mean(tr[np.nonzero(tr)])) % 360
axh.text(170 * np.pi / 180, axh.get_ylim()[1] * .65,
'{0:.1f}$^o$'.format(tpmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': self.text_size},
bbox={'facecolor': (0, .1, .9), 'alpha': 0.25})
print(' *Tipper Stike: median={0:.1f} mode={1:.1f} mean={2:.1f}\n'.format(
tpmedian,
tpmode,
tpmean))
axh.set_title('Tipper Strike', fontdict=fd,
bbox={'facecolor': (0, .1, .9), 'alpha': 0.25})
# move title up a little to make room for labels
axh.titleOffsetTrans._t = (0, .15)
# remind the user what the assumptions of the strike angle are
print('Note: North is assumed to be 0 and the strike angle is ' +\
'measured clockwise positive.')
plt.show()
def save_plot(self, save_fn, file_format='pdf',
orientation='portrait', fig_dpi=None, close_plot='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_ResPhase.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotPhaseTensorMaps(edilist,freqspot=10)
>>> p1.save_plot(r'/home/MT', file_format='jpg')
'Figure saved to /home/MT/PTMaps/PTmap_phimin_10Hz.jpg'
"""
if fig_dpi is None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
# plt.clf()
# plt.close(self.fig)
else:
if not os.path.exists(save_fn):
os.mkdir(save_fn)
save_fn = os.path.join(save_fn, 'StrikeAnalysis_' + file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
if close_plot == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: ' + self.fig_fn)
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> [ax.grid(True, which='major') for ax in [p1.axr,p1.axp]]
>>> p1.update_plot()
"""
self.fig.canvas.draw()
def redraw_plot(self):
"""
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> p1.xy_color = (.5,.5,.9)
>>> p1.xy_marker = '*'
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return "Plots phase tensor maps for one freq"
def writeTextFiles(self, save_path=None):
"""
Saves the strike information as a text file.
"""
# check to see if the strikes have been calculated
try:
self.bin_width
except AttributeError:
self.plot()
# get the path to save the file to
if save_path is None:
try:
svpath = os.path.dirname(self.mt_list[0].fn)
except TypeError:
raise IOError('Need to input save_path, could not find path')
else:
svpath = save_path
# set
if self.fold == True:
histrange = (-180, 180)
elif self.fold == False:
histrange = (0, 360)
# set the bin width
bw = self.bin_width
slistinv = [['station']]
slistpt = [['station']]
slisttip = [['station']]
# calculate the strikes for the different period bands
for jj, bb in enumerate(self._brange):
tstr = self.title_dict[bb].replace('$', '')
tstr = tstr.replace('{', '').replace('}', '').replace('^', 'e')
tstr = tstr.replace('s', '(s)')
slistinv[0].append(tstr)
slistpt[0].append(tstr)
slisttip[0].append(tstr)
# calculate the strike for the different period bands per station
for kk, mt in enumerate(self.mt_list, 1):
if jj == 0:
slistinv.append([mt.station])
slistpt.append([mt.station])
slisttip.append([mt.station])
zinv = mt.Z.invariants
pt = mt.pt
tp = mt.Tipper
bnlist = []
for nn, per in enumerate(mt.period):
if per > 10**bb and per < 10**(bb + 1):
bnlist.append(nn)
#---> strike from invariants
zs = 90 - zinv.strike[bnlist]
# fold so the angle goes from 0 to 180
if self.fold == True:
# for plotting put the NW angles into the SE quadrant
zs[np.where(zs > 90)] = zs[np.where(zs > 90)] - 180
zs[np.where(zs < -90)] = zs[np.where(zs < -90)] + 180
# leave as the total unit circle 0 to 360
elif self.fold == False:
pass
zshist = np.histogram(zs[np.nonzero(zs)].flatten(),
bins=int(360/bw),
range=histrange)
#==============================================================
# For putting the values into a useful text file
# need to subtract 90 from the values to put them into
# coordinates where north is 0 and east is 90, which is
# different from plotting where east in the plotting function
# is equal to 0 and north is 90, measuring counter-clockwise
#==============================================================
#==> compute mean
invmean = 90 - zs.mean()
if invmean < 0:
invmean += 360
invmed = 90 - np.median(zs)
#==> compute median
if invmed < 0:
invmed += 360
#==> compute mode
invmode = 90 - zshist[1][np.where(
zshist[0] == zshist[0].max())[0][0]]
if invmode < 0:
invmode += 360
#==> append to list
slistinv[kk].append((invmean,
invmed,
invmode))
#---> strike from phase tensor
az = pt.azimuth[0][bnlist]
# fold so the angle goes from 0 to 180
if self.fold == True:
az[np.where(az > 90)] = az[np.where(az > 90)] - 180
az[np.where(az < -90)] = az[np.where(az < -90)] + 180
# leave as the total unit circle 0 to 360
elif self.fold == False:
az[np.where(az < 0)] = az[np.where(az < 0)] + 360
# == > compute mean
ptmean1 = 90 - az.mean()
if ptmean1 < 0:
ptmean1 += 360
# == > compute median
ptmed1 = 90 - np.median(az)
if ptmed1 < 0:
ptmed1 += 360
# == > compute mode
azhist = np.histogram(az[np.nonzero(az)].flatten(),
bins=int(360/bw),
range=histrange)
ptmode1 = 90 - azhist[1][np.where(
azhist[0] == azhist[0].max())[0][0]]
if ptmode1 < 0:
ptmode1 += 360
slistpt[kk].append((ptmean1,
ptmed1,
ptmode1))
#---> strike from tipper
# needs to be negative because measures clockwise
if tp._Tipper.tipper is None:
tp._Tipper.tipper = np.zeros((len(mt.period), 1, 2),
dtype='complex')
tp.compute_components()
tipr = -tp.angle_real[bnlist]
# fold so the angle goes from 0 to 180
if self.fold == True:
tipr[np.where(tipr > 90)] = tipr[np.where(tipr > 90)] - 180
tipr[np.where(tipr < -90)
] = tipr[np.where(tipr < -90)] + 180
# leave as the total unit circle 0 to 360
elif self.fold == False:
tipr[np.where(tipr < 0)] = tipr[np.where(tipr < 0)] + 360
tphist = np.histogram(tipr[np.nonzero(tipr)].flatten(),
bins=int(360/bw),
range=histrange)
#==> compute mean
tpmean1 = 90 - tipr.mean()
if tpmean1 < 0:
tpmean1 += 360
#==> compute median
tpmed1 = 90 - np.median(tipr)
if tpmed1 < 0:
tpmed1 += 360
#==> compute mode
tpmode1 = 90 - tphist[1][np.where(
tphist[0] == tphist[0].max())[0][0]]
if tpmode1 < 0:
tpmode1 += 360
#--> append statistics to list
slisttip[kk].append((tpmean1,
tpmed1,
tpmode1))
# make a list of indicies for each decades
binlist = []
for ii, ff in enumerate(self._plist):
if ff > 10**bb and ff < 10**(bb + 1):
binlist.append(ii)
# extract just the subset for each decade
hh = self._medinv[binlist, :]
gg = self._medpt[binlist, :]
tr = self._medtp[binlist, :]
# estimate the histogram for the decade for invariants and pt
invhist = np.histogram(hh[np.nonzero(hh)].flatten(),
bins=int(360/bw),
range=histrange)
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=int(360/bw),
range=histrange)
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=int(360/bw),
range=histrange)
#--> include the row for mean, median and mode for each parameter
if jj == 0:
slistinv.append(['mean'])
slistinv.append(['median'])
slistinv.append(['mode'])
slistpt.append(['mean'])
slistpt.append(['median'])
slistpt.append(['mode'])
slisttip.append(['mean'])
slisttip.append(['median'])
slisttip.append(['mode'])
#--> compute mean, median and mode for invariants
# == > mean
imean = 90 - np.mean(hh[np.nonzero(hh)])
if imean < 0:
imean += 360
# == > median
imed = 90 - np.median(hh[np.nonzero(hh)])
if imed < 0:
imed += 360
# == > mode
imode = 90 - invhist[1][np.where(
invhist[0] == invhist[0].max())[0][0]]
if imode < 0:
imode += 360
#--> add them to the list of estimates
slistinv[kk + 1].append(imean)
slistinv[kk + 2].append(imed)
slistinv[kk + 3].append(imode)
#--> compute pt statistics
# == > mean
ptmean = 90 - np.mean(gg[np.nonzero(gg)])
if ptmean < 0:
ptmean = np.mean(gg[np.nonzero(gg)])
# == > median
ptmed = 90 - np.median(gg[np.nonzero(gg)])
if ptmed < 0:
ptmed += 360
# == > mode
ptmode = 90 - pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]]
if ptmode < 0:
ptmode += 360
#--> add the statistics to the parameter list
slistpt[kk + 1].append(ptmean)
slistpt[kk + 2].append(ptmed)
slistpt[kk + 3].append(ptmode)
#--> compute tipper statistics
# == > mean
tpmean = 90 - np.mean(tipr[np.nonzero(tipr)])
if tpmean < 0:
tpmean += 360
# == > median
tpmed = 90 - np.median(tipr[np.nonzero(tipr)])
if tpmed < 0:
tpmed += 360
# == > mode
tpmode = 90 - trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]]
if tpmode < 0:
tpmode += 360
#--> add the statistics to parameter list
slisttip[kk + 1].append(tpmean)
slisttip[kk + 2].append(tpmed)
slisttip[kk + 3].append(tpmode)
invfid = file(os.path.join(svpath, 'Strike.invariants'), 'w')
ptfid = file(os.path.join(svpath, 'Strike.pt'), 'w')
tpfid = file(os.path.join(svpath, 'Strike.tipper'), 'w')
#---> write strike from the invariants
# == > mean
invfid.write('-' * 20 + 'MEAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistinv):
for jj, l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
# == > median
invfid.write('-' * 20 + 'MEDIAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistinv):
for jj, l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
# == > mode
invfid.write('-' * 20 + 'MODE' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistinv):
for jj, l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
invfid.close()
#---> write the phase tensor text files
ptfid.write('-' * 20 + 'MEAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistpt):
for jj, l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.write('-' * 20 + 'MEDIAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistpt):
for jj, l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.write('-' * 20 + 'MODE' + '-' * 20 + '\n')
for ii, l1 in enumerate(slistpt):
for jj, l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.close()
#---> write the tipper text files
tpfid.write('-' * 20 + 'MEAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slisttip):
for jj, l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.write('-' * 20 + 'MEDIAN' + '-' * 20 + '\n')
for ii, l1 in enumerate(slisttip):
for jj, l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.write('-' * 20 + 'MODE' + '-' * 20 + '\n')
for ii, l1 in enumerate(slisttip):
for jj, l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2 + ' ' * 6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.close()
|
gpl-3.0
|
vybstat/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
243
|
7461
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/arrays/categorical/test_constructors.py
|
1
|
25328
|
from datetime import datetime
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
class TestCategoricalConstructors:
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes(
[0, 0, 1], categories=["a", "b", "c"], ordered=ordered
)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = pd.Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array(
[
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
("a", "b"),
],
dtype=object,
)[:-1]
result = Categorical(values)
expected = Index(
[(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)],
tupleize_cols=False,
)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype="O")
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = (
"'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument."
)
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical(
[Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True
)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2.0, 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1.0, 2.0, 3.0])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical( # noqa
np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(["a", "b"], categories="a")
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorical categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(
DatetimeIndex(["nat", "20160101"]),
categories=[NaT, Timestamp("20160101")],
)
def test_constructor_with_index(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(
ci.values, Categorical(ci.astype(object), categories=ci.categories)
)
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"dtl",
[
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s"),
],
)
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range("1 days", freq="D", periods=3)
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range("2015-01-01", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
@pytest.mark.parametrize(
"values",
[
np.array([1.0, 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype="int64"),
["a", "b", "c", np.nan],
[pd.Period("2014-01"), pd.Period("2014-02"), NaT],
[Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT],
[
Timestamp("2014-01-01", tz="US/Eastern"),
Timestamp("2014-01-02", tz="US/Eastern"),
NaT,
],
],
)
def test_constructor_invariant(self, values):
# GH 14190
c = Categorical(values)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ["b", "a", "c"]
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(["a", "b", "a", "c"], dtype=dtype)
expected = Categorical(
["a", "b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(["a", "b"], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], categories=["a", "b"], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=False, dtype=dtype)
@pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]])
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(
["a", "b"], categories=categories, ordered=ordered, dtype="category"
)
expected = Categorical(["a", "b"], categories=categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_np_strs(self):
# GH#31499 Hastable.map_locations needs to work on np.str_ objects
cat = pd.Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
assert all(isinstance(x, np.str_) for x in cat.categories)
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "d"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(["a", "b", "d"])
# use categories, ordered
result = Categorical(
values, categories=["a", "b", "c"], ordered=True, dtype="category"
)
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list])
def test_construction_with_null(self, klass, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/31927
values = klass(["a", nulls_fixture, "b"])
result = Categorical(values)
dtype = CategoricalDtype(["a", "b"])
codes = [0, -1, 1]
expected = Categorical.from_codes(codes=codes, dtype=dtype)
tm.assert_categorical_equal(result, expected)
def test_from_codes_empty(self):
cat = ["a", "b", "c"]
result = Categorical.from_codes([], categories=cat)
expected = Categorical([], categories=cat)
tm.assert_categorical_equal(result, expected)
def test_from_codes_too_few_categories(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
def test_from_codes_non_int_codes(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
def test_from_codes_non_unique_categories(self):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
def test_from_codes_nan_cat_included(self):
with pytest.raises(ValueError, match="Categorical categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
def test_from_codes_too_negative(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
def test_from_codes(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_categorical_categories(self, klass):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_non_unique_categorical_categories(self, klass):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], klass(["a", "b", "a"]))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=["a", "b", "c"])
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
@pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]])
def test_from_codes_with_float(self, codes):
# GH21767
# float codes should raise even if values are equal to integers
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = "Cannot specify"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"])
)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"])
)
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
def test_from_codes_with_nullable_int(self):
codes = pd.array([0, 1], dtype="Int64")
categories = ["a", "b"]
result = Categorical.from_codes(codes, categories=categories)
expected = Categorical.from_codes(codes.to_numpy(int), categories=categories)
tm.assert_categorical_equal(result, expected)
def test_from_codes_with_nullable_int_na_raises(self):
codes = pd.array([0, None], dtype="Int64")
categories = ["a", "b"]
msg = "codes cannot contain NA values"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, categories=categories)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
codes = np.array([0, 0, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories_sorts(self, dtype):
cats = ["b", "a"]
codes = np.array([0, 1, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ["a", "b", "d"]
codes = np.array([0, 1, 0, 2], dtype="i8")
dtype = CategoricalDtype(["c", "b", "a"], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(
["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ["1", "2", "bad"]
codes = np.array([0, 0, 1, 2], dtype="i8")
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("ordered", [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
@pytest.mark.skipif(_np_version_under1p16, reason="Skipping for NumPy <1.16")
def test_constructor_string_and_tuples(self):
# GH 21416
c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = pd.Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
|
bsd-3-clause
|
fbagirov/scikit-learn
|
examples/tree/plot_tree_regression.py
|
206
|
1476
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
taynaud/sparkit-learn
|
splearn/linear_model/logistic.py
|
2
|
6215
|
# encoding: utf-8
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import LogisticRegression
from ..utils.validation import check_rdd
from .base import SparkLinearModelMixin
class SparkLogisticRegression(LogisticRegression, SparkLinearModelMixin):
"""Distributed implementation of scikit-learn's Logistic classifier.
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
# TODO: REVISIT!
# workaround to keep the classes parameter unchanged
@property
def classes_(self):
return self._classes_
@classes_.setter
def classes_(self, value):
pass
def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
# possible improve to partial_fit in partisions and then average
# in final reduce
self._classes_ = np.unique(classes)
return self._spark_fit(SparkLogisticRegression, Z)
def predict(self, X):
"""Distributed method to predict class labels for samples in X.
Parameters
----------
X : ArrayRDD containing {array-like, sparse matrix}
Samples.
Returns
-------
C : ArrayRDD
Predicted class label per sample.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return self._spark_predict(SparkLogisticRegression, X)
def to_scikit(self):
m = self._to_scikit(LogisticRegression)
m.classes_ = self._classes_
return m
|
apache-2.0
|
fbagirov/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
11221
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
maxplanck-ie/HiCExplorer
|
hicexplorer/hicPrepareQCreport.py
|
1
|
14401
|
#!/usr/bin/env python
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import argparse
import os
import errno
import matplotlib
import pandas as pd
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from hicexplorer._version import __version__
import logging
log = logging.getLogger(__name__)
def parse_arguments():
parser = argparse.ArgumentParser(description='Tabulates and plots QC measures from '
'hicBuildMatrix log files within an HTML output',
add_help=False,
usage='%(prog)s --logfiles matrix1_QCfolder/QC.log matrix2_QCfolder/QC.log '
'--labels "sample 1" "sample 2" --outputFolder QC_all_samples)')
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--logfiles', '-l',
help='Path to the log files to be processed',
type=argparse.FileType('r'),
nargs="+",
required=True)
parserRequired.add_argument('--labels',
help='Label to assign to each log file. Each label should be separated by a space. Quote '
'labels that contain spaces: E.g. --labels label1 "labels 2"',
nargs="+")
parserRequired.add_argument('--outputFolder', '-o',
help='Several files with be saved under this folder: A table containing the results and '
'a html file with several images.',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--dpi',
help='Image resolution. By default high resolution png images with a 200 dpi are created.',
type=int,
default=200)
parserOpt.add_argument("--help", "-h", action="help",
help="show this help message and exit")
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def save_html(filename, unmap_table, discard_table, distance_table, orientation_table, all_table):
root = os.path.dirname(os.path.abspath(__file__))
html = open(os.path.join(root, "qc_template.html"), "r")
html_content = html.read()
# the html code has a placeholder for the html table
html_content = html_content.replace("%%TABLE_UNMAP%%", unmap_table.style
.format(lambda x: '{:,}'.format(x) if x > 1 else '{:.2%}'.format(x)).render())
html_content = html_content.replace("%%TABLE_DISCARDED%%", discard_table.style
.format(lambda x: '{:,}'.format(x) if x > 1 else '{:.2%}'.format(x)).render())
html_content = html_content.replace("%%TABLE_DISTANCE%%", distance_table.style
.format(lambda x: '{:,}'.format(x) if x > 1 else '{:.2%}'.format(x)).render())
html_content = html_content.replace("%%TABLE_ORIENTATION%%", orientation_table.style
.format(lambda x: '{:,}'.format(x) if x > 1 else '{:.2%}'.format(x)).render())
all_table = all_table[['Sequenced reads', 'Pairs mappable, unique and high quality', 'Hi-C contacts',
'One mate unmapped', 'One mate not unique', 'Low mapping quality', 'dangling end',
'self ligation (removed)', 'One mate not close to rest site', 'same fragment',
'self circle', 'duplicated pairs', 'inter chromosomal', 'Intra short range (< 20kb)',
'Intra long range (>= 20kb)', 'Read pair type: inward pairs', 'Read pair type: outward pairs', 'Read pair type: left pairs', 'Read pair type: right pairs']]
html_content = html_content.replace("%%TABLE%%", all_table.style.render())
with open(filename, 'w') as fh:
fh.write(html_content)
html.close()
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_figure_pairs_used(table, filename, dpi):
prc_table = table[[
'Hi-C contacts', 'Pairs mappable, unique and high quality', 'Sequenced reads']] / 1e6
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
prc_table.plot(kind='barh', ax=ax)
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlabel("Number of reads in millions")
ax.set_ylabel("")
plt.savefig(filename, bbox_extra_artists=(
lgd,), bbox_inches='tight', dpi=dpi)
def make_figure_umappable_non_unique_reads(table, filename, dpi):
prc_table = table[['Hi-C contacts', 'Low mapping quality', 'One mate not unique',
'One mate unmapped']].T / table['Sequenced reads']
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
prc_table.plot.bar(ax=ax)
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=45, ha='right')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_ylabel("fraction w.r.t. pairs sequenced")
plt.savefig(filename, bbox_extra_artists=(
lgd,), bbox_inches='tight', dpi=dpi)
# merge the counts table with the percentages table
ret_table = table[['Hi-C contacts', 'Low mapping quality', 'One mate not unique',
'One mate unmapped']].join(prc_table.T, rsuffix='_%')
return ret_table[[u'Hi-C contacts', u'Hi-C contacts_%', u'Low mapping quality',
u'Low mapping quality_%', u'One mate not unique',
u'One mate not unique_%',
u'One mate unmapped', u'One mate unmapped_%']]
def make_figure_pairs_discarded(table, filename, dpi):
prc_table = table[['One mate not close to rest site', 'dangling end', 'duplicated pairs',
'same fragment', 'self circle',
'self ligation (removed)']].T / table['Pairs mappable, unique and high quality']
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
prc_table.plot.bar(ax=ax)
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=45, ha='right')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_ylabel("fraction w.r.t. mappable and unique pairs")
plt.savefig(filename, bbox_extra_artists=(
lgd,), bbox_inches='tight', dpi=dpi)
# merge the counts table with the percentages table
ret_table = table[['One mate not close to rest site', 'dangling end', 'duplicated pairs',
'same fragment', 'self circle',
'self ligation (removed)']].join(prc_table.T, rsuffix=' %')
return ret_table[['One mate not close to rest site', 'One mate not close to rest site %',
'dangling end', 'dangling end %', 'duplicated pairs', 'duplicated pairs %',
'same fragment', 'same fragment %',
'self circle', 'self circle %', 'self ligation (removed)', 'self ligation (removed) %']]
def make_figure_distance(table, filename):
prc_table2 = table[['inter chromosomal',
'Intra short range (< 20kb)', 'Intra long range (>= 20kb)']].T / table['Hi-C contacts']
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111)
prc_table2.plot.bar(ax=ax)
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=45, ha='right')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_ylabel("fraction w.r.t. valid Hi-C contacts")
plt.savefig(filename, bbox_extra_artists=(
lgd,), bbox_inches='tight', dpi=200)
# merge the counts table with the percentages table
ret_table = table[['inter chromosomal', 'Intra short range (< 20kb)', 'Intra long range (>= 20kb)']].join(
prc_table2.T, rsuffix=' %')
return ret_table[['inter chromosomal', 'inter chromosomal %', 'Intra short range (< 20kb)', 'Intra short range (< 20kb) %', 'Intra long range (>= 20kb)', 'Intra long range (>= 20kb) %']]
def make_figure_read_orientation(table, filename, dpi):
_t = table[[u'Read pair type: inward pairs', u'Read pair type: outward pairs',
u'Read pair type: left pairs', u'Read pair type: right pairs']].T
prc_table3 = _t / _t.sum(axis=0)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(111)
prc_table3.plot.bar(ax=ax)
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_ylabel("fraction w.r.t. valid Hi-C contacts")
plt.savefig(filename, bbox_extra_artists=(
lgd,), bbox_inches='tight', dpi=dpi)
# merge the counts table with the percentages table
ret_table = table[[u'Read pair type: inward pairs', u'Read pair type: outward pairs',
u'Read pair type: left pairs', u'Read pair type: right pairs']].join(prc_table3.T, rsuffix=' %')
return ret_table[[u'Read pair type: inward pairs', u'Read pair type: inward pairs %', u'Read pair type: outward pairs', u'Read pair type: outward pairs %',
u'Read pair type: left pairs', u'Read pair type: left pairs %', u'Read pair type: right pairs', u'Read pair type: right pairs %']]
def main(args=None):
"""
The structure of the log file is as follows:
--------------------------------------------
File /tmp/test
Sequenced reads 99983
Min rest. site distance 150
Max rest. site distance 1500
# count (percentage w.r.t. total sequenced reads)
Pairs mappable, unique and high quality 52726 (52.73)
Hi-C contacts 36552 (36.56)
One mate unmapped 8777 (8.78)
One mate not unique 3603 (3.60)
Low mapping quality 34877 (34.88)
# count (percentage w.r.t. mappable, unique, high quality pairs)
dangling end 209 (0.40)
self ligation (removed) 5056 (9.59)
One mate not close to rest site 751 (1.42)
same fragment 10146 (19.24)
self circle 4274 (8.11)
duplicated pairs 12 (0.02)
# count (percentage w.r.t. total valid Hi-C contacts)
inter chromosomal 5849 (16.00)
Intra short range (< 20kb) 8689 (23.77)
Intra long range (>= 20kb) 22014 (60.23)
Read pair type: inward pairs 6977 (19.09)
Read pair type: outward pairs 9525 (26.06)
Read pair type: left pairs 7012 (19.18)
Read pair type: right pairs 7189 (19.67)
"""
args = parse_arguments().parse_args(args)
params = dict()
make_sure_path_exists(args.outputFolder)
for fh in args.logfiles:
in_log_part = False
log.debug('Processing {}\n'.format(fh.name))
for line in fh.readlines():
if line.startswith("File"):
in_log_part = True
if in_log_part is True:
if line.strip() == "" or line.startswith("#"):
continue
fields = line.strip().split("\t")
if len(fields) == 1:
continue
if fields[0] not in params:
params[fields[0]] = []
try:
params[fields[0]].append(int(fields[1]))
except ValueError:
params[fields[0]].append(fields[1])
table = pd.DataFrame(params)
if args.labels and len(args.labels) == len(args.logfiles):
try:
table['Labels'] = args.labels
except ValueError:
log.error("*ERROR* Some log files may not be valid. Please check that the log files contain "
"at the end the summary information.")
exit()
table = table.set_index('Labels')
else:
table = table.set_index('File')
if 'Pairs mappable, unique and high quality' not in table.columns:
table['Pairs mappable, unique and high quality'] = \
table['Sequenced reads'] - (table['One mate unmapped'] +
table['One mate not unique'] + table['Low mapping quality'])
if 'same fragment (800 bp)' in table.columns:
# older versions of the QC used the label 'same fragment (800 bp)'
table['same fragment'] = table['same fragment (800 bp)']
make_figure_pairs_used(table, args.outputFolder +
"/pairs_sequenced.png", args.dpi)
unmap_table = make_figure_umappable_non_unique_reads(table, args.outputFolder + "/unmappable_and_non_unique.png",
args.dpi)
discarded_table = make_figure_pairs_discarded(
table, args.outputFolder + "/pairs_discarded.png", args.dpi)
distance_table = make_figure_distance(
table, args.outputFolder + "/distance.png")
read_orientation_table = make_figure_read_orientation(
table, args.outputFolder + "/read_orientation.png", args.dpi)
save_html(args.outputFolder + "/hicQC.html", unmap_table, discarded_table, distance_table,
read_orientation_table, table)
unmap_table.to_csv(args.outputFolder + "/unmapable_table.txt", sep="\t")
discarded_table.to_csv(args.outputFolder +
"/discarded_table.txt", sep="\t")
distance_table.to_csv(args.outputFolder + "/distance_table.txt", sep="\t")
read_orientation_table.to_csv(
args.outputFolder + "/read_orientation_table.txt", sep="\t")
table.to_csv(args.outputFolder + "/QC_table.txt", sep="\t")
|
gpl-2.0
|
danche354/Sequence-Labeling
|
ner_BIOES/senna-pos-chunk-gazetteer-64-32-rmsprop5.py
|
1
|
7343
|
from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
from keras.optimizers import RMSprop
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.gazetteer_length
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = 70 #conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_pos_chunk_gazetteer_merge = merge([embedding, pos_input, chunk_input, gazetteer_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(64, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(32, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,pos_input,chunk_input, gazetteer_input], output=output)
rmsprop = RMSprop(lr=0.0005)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=train_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, pos, chunk, gazetteer], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=dev_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, pos, chunk, gazetteer], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, pos, chunk, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
|
mit
|
Dioptas/pymatgen
|
pymatgen/io/abinitio/abiinspect.py
|
2
|
17004
|
# coding: utf-8
"""
This module provides objects to inspect the status of the Abinit tasks at run-time.
by extracting information from the main output file (text format).
"""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import yaml
import six
from six.moves import cStringIO, map, zip
from prettytable import PrettyTable
from pymatgen.util.plotting_utils import add_fig_kwargs
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
#Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = collections.OrderedDict((k, []) for k in keys)
if fields is not None:
#print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section.
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
return fields
def plottable_from_outfile(filepath):
"""
Factory function that returns a plottable object by inspecting the main output file of abinit
Returns None if it is not able to detect the class to instantiate.
"""
# TODO
# Figure out how to detect the type of calculations
# without having to parse the input. Possible approach: YAML doc
#with YamlTokenizer(filepath) as r:
# doc = r.next_doc_with_tag("!CalculationType")
# d = yaml.load(doc.text_notag)
# calc_type = d["calculation_type"]
#ctype2class = {
# "Ground State": GroundStateScfCycle,
# "Phonon": PhononScfCycle,
# "Relaxation": Relaxation,
#}
#obj = ctype2class.get(calc_type, None)
obj = GroundStateScfCycle
if obj is not None:
return obj.from_file(filepath)
else:
return None
class ScfCycle(collections.Mapping):
"""
It essentially consists of a dictionary mapping string
to list of floats containing the data at the different iterations.
"""
def __init__(self, fields):
self.fields = fields
#print(fields)
all_lens = [len(lst) for lst in self.values()]
self.num_iterations = all_lens[0]
assert all(n == self.num_iterations for n in all_lens)
def __getitem__(self, slice):
return self.fields.__getitem__(slice)
def __iter__(self):
return self.fields.__iter__()
def __len__(self):
return len(self.fields)
def __str__(self):
"""String representation."""
table = PrettyTable([list(self.fields.keys())])
for it in range(self.num_iterations):
row = list(map(str, (self[k][it] for k in self.keys())))
table.add_row(row)
stream = cStringIO()
print(table, out=stream)
stream.seek(0)
return "".join(stream)
@property
def last_iteration(self):
"""Returns a dictionary with the values of the last iteration."""
return {k: v[-1] for k, v in self.items()}
@classmethod
def from_file(cls, filepath):
"""Read the first occurrence of ScfCycle from file."""
with open(filepath, "r") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
"""
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop("iter")
return cls(fields)
else:
return None
@add_fig_kwargs
def plot(self, **kwargs):
"""
Uses matplotlib to plot the evolution of the SCF cycle. Return `matplotlib` figure
"""
import matplotlib.pyplot as plt
# Build grid of plots.
num_plots, ncols, nrows = len(self), 1, 1
if num_plots > 1:
ncols = 2
nrows = (num_plots//ncols) + (num_plots % ncols)
fig, ax_list = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
ax_list = ax_list.ravel()
iter_num = np.array(list(range(self.num_iterations)))
for ((key, values), ax) in zip(self.items(), ax_list):
ax.grid(True)
ax.set_xlabel('Iteration')
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if self.num_iterations > 1:
# Don't show the first iteration since it's not very useful.
xx, yy = xx[1:] + 1, values[1:]
#print("xx ",xx, "yy ",yy)
ax.plot(xx, yy, "-o", lw=2.0)
# Get around a bug in matplotlib.
if (num_plots % ncols) != 0:
ax_list[-1].plot(xx, yy, lw=0.0)
ax_list[-1].axis('off')
return fig
class GroundStateScfCycle(ScfCycle):
"""Result of the Ground State self-consistent cycle."""
#yaml_tag = '!GroundStateScfCycle'
MAGIC = "iter Etot(hartree)"
@property
def last_etotal(self):
"""The total energy at the last iteration."""
return self["Etot(hartree)"][-1]
class PhononScfCycle(ScfCycle):
"""Result of the Phonon self-consistent cycle."""
#yaml_tag = '!PhononScfCycle'
MAGIC = "iter 2DEtotal(Ha)"
@property
def last_etotal(self):
"""The 2-nd order derivative of the energy at the last iteration."""
return self["2DEtotal(Ha)"][-1]
class Relaxation(collections.Iterable):
"""
A list of :class:`GroundStateScfCycle` objects.
.. note::
Forces, stresses and crystal structures are missing.
Solving this problem would require the standardization
of the Abinit output file (YAML).
"""
def __init__(self, cycles):
self.cycles = cycles
def __iter__(self):
return self.cycles.__iter__()
def __len__(self):
return self.cycles.__len__()
def __str__(self):
"""String representation."""
lines = []
app = lines.append
for i, cycle in enumerate(self):
app("")
app("RELAXATION STEP: %d" % i)
app(str(cycle))
app("")
return "\n".join(lines)
@classmethod
def from_file(cls, filepath):
"""Initialize the object from the Abinit main output file."""
with open(filepath, "r") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Extract data from stream. Returns None if some error occurred.
"""
cycles = []
while True:
scf_cycle = GroundStateScfCycle.from_stream(stream)
if scf_cycle is None: break
cycles.append(scf_cycle)
return cls(cycles) if cycles else None
@property
def history(self):
"""
Dictionary of lists with the evolution of the data as function of the relaxation step.
"""
try:
return self._history
except AttributeError:
self._history = history = collections.defaultdict(list)
for cycle in self:
d = cycle.last_iteration
for k, v in d.items():
history[k].append(v)
return self._history
@add_fig_kwargs
def plot(self, **kwargs):
"""
Uses matplotlib to plot the evolution of the structural relaxation.
Returns:
`matplotlib` figure
"""
import matplotlib.pyplot as plt
history = self.history
#print(history)
relax_step = list(range(len(self)))
# Build grid of plots.
num_plots, ncols, nrows = len(list(history.keys())), 1, 1
if num_plots > 1:
ncols = 2
nrows = (num_plots//ncols) + (num_plots % ncols)
fig, ax_list = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
ax_list = ax_list.ravel()
if (num_plots % ncols) != 0:
ax_list[-1].axis('off')
for (key, values), ax in zip(history.items(), ax_list):
ax.grid(True)
ax.set_xlabel('Relaxation Step')
ax.set_xticks(relax_step, minor=False)
ax.set_ylabel(key)
ax.plot(relax_step, values, "-o", lw=2.0)
return fig
# TODO
#class HaydockIterations(collections.Iterable):
# """This object collects info on the different steps of the Haydock technique used in the Bethe-Salpeter code"""
# @classmethod
# def from_file(cls, filepath):
# """Initialize the object from file."""
# with open(filepath, "r") as stream:
# return cls.from_stream(stream)
#
# @classmethod
# def from_stream(cls, stream):
# """Extract data from stream. Returns None if some error occurred."""
# cycles = []
# while True:
# scf_cycle = GroundStateScfCycle.from_stream(stream)
# if scf_cycle is None: break
# cycles.append(scf_cycle)
#
# return cls(cycles) if cycles else None
#
# #def __init__(self):
#
# def plot(self, **kwargs):
# """
# Uses matplotlib to plot the evolution of the structural relaxation.
# ============== ==============================================================
# kwargs Meaning
# ============== ==============================================================
# title Title of the plot (Default: None).
# how True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
# ============== ==============================================================
# Returns:
# `matplotlib` figure
# """
# import matplotlib.pyplot as plt
# title = kwargs.pop("title", None)
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# if title: fig.suptitle(title)
# if savefig is not None: fig.savefig(savefig)
# if show: plt.show()
# return fig
class YamlTokenizerError(Exception):
"""Exceptions raised by :class:`YamlTokenizer`."""
class YamlTokenizer(collections.Iterator):
"""
Provides context-manager support so you can use it in a with statement.
"""
Error = YamlTokenizerError
def __init__(self, filename):
# The position inside the file.
self.linepos = 0
self.stream = open(filename, "r")
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
try:
self.stream.close()
except:
print("Exception in YAMLTokenizer.close()")
print(straceback())
def seek(self, offset, whence=0):
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
assert offset == 0
self.linepos = 0
return self.stream.seek(offset, whence)
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
"""
Returns the first YAML document in stream.
.. warning::
Assume that the YAML document are closed explicitely with the sentinel '...'
"""
in_doc, lines, doc_tag = None, [], None
for i, line in enumerate(self.stream):
self.linepos += 1
#print(i, line)
if line.startswith("---"):
# Include only lines in the form:
# "--- !tag"
# "---"
# Other lines are spurious.
in_doc = False
l = line[3:].strip().lstrip()
if l.startswith("!"):
# "--- !tag"
doc_tag = l
in_doc = True
elif not l:
# "---"
in_doc = True
doc_tag = None
if in_doc:
lineno = self.linepos
if in_doc:
lines.append(line)
if in_doc and line.startswith("..."):
return YamlDoc(text="".join(lines), lineno=lineno, tag=doc_tag)
raise StopIteration("Cannot find next YAML document")
def all_yaml_docs(self):
"""
Returns a list with all the YAML docs found in stream.
Seek the stream before returning.
.. warning::
Assume that all the YAML docs (with the exception of the last one)
are closed explicitely with the sentinel '...'
"""
docs = [doc for doc in self]
self.seek(0)
return docs
def next_doc_with_tag(self, doc_tag):
"""
Returns the next document with the specified tag. Empty string is no doc is found.
"""
while True:
try:
doc = six.advance_iterator(self)
if doc.tag == doc_tag:
return doc
except StopIteration:
raise
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs
def yaml_read_kpoints(filename, doc_tag="!Kpoints"):
"""Read the K-points from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return np.array(d["reduced_coordinates_of_qpoints"])
def yaml_read_irred_perts(filename, doc_tag="!IrredPerts"):
"""Read the list of irreducible perturbations from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return d["irred_perts"]
class YamlDoc(object):
"""
Handy object that stores that YAML document, its main tag and the
position inside the file.
"""
__slots__ = [
"text",
"lineno",
"tag",
]
def __init__(self, text, lineno, tag=None):
"""
Args:
text: String with the YAML document.
lineno: The line number where the document is located.
tag: The YAML tag associate to the document.
"""
# Sanitize strings: use "ignore" to skip invalid characters in .encode/.decode like
if isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
text = text.rstrip().lstrip()
self.text = text
self.lineno = lineno
if isinstance(tag, bytes):
tag = tag.decode("utf-8", "ignore")
self.tag = tag
def __str__(self):
return self.text
def __eq__(self, other):
if other is None: return False
return (self.text == other.text and
self.lineno == other.lineno and
self.tag == other.tag)
def __ne__(self, other):
return not self == other
@property
def text_notag(self):
"""
Returns the YAML text without the tag.
Useful if we don't have any constructor registered for the tag
(we used the tag just to locate the document).
"""
if self.tag is not None:
return self.text.replace(self.tag, "")
else:
return self.text
|
mit
|
rspavel/spack
|
var/spack/repos/builtin/packages/qmcpack/package.py
|
2
|
16625
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import llnl.util.tty as tty
class Qmcpack(CMakePackage, CudaPackage):
"""QMCPACK, is a modern high-performance open-source Quantum Monte
Carlo (QMC) simulation code."""
# Package information
homepage = "http://www.qmcpack.org/"
git = "https://github.com/QMCPACK/qmcpack.git"
maintainers = ['naromero77']
tags = ['ecp', 'ecp-apps']
# This download method is untrusted, and is not recommended by the
# Spack manual. However, it is easier to maintain because github hashes
# can occasionally change.
# NOTE: 12/19/2017 QMCPACK 3.0.0 does not build properly with Spack.
version('develop')
version('3.9.2', tag='v3.9.2')
version('3.9.1', tag='v3.9.1')
version('3.9.0', tag='v3.9.0')
version('3.8.0', tag='v3.8.0')
version('3.7.0', tag='v3.7.0')
version('3.6.0', tag='v3.6.0')
version('3.5.0', tag='v3.5.0')
version('3.4.0', tag='v3.4.0')
version('3.3.0', tag='v3.3.0')
version('3.2.0', tag='v3.2.0')
version('3.1.1', tag='v3.1.1')
version('3.1.0', tag='v3.1.0')
# These defaults match those in the QMCPACK manual
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo'))
variant('mpi', default=True, description='Build with MPI support')
variant('phdf5', default=True, description='Build with parallel collective I/O')
variant('complex', default=False,
description='Build the complex (general twist/k-point) version')
variant('mixed', default=False,
description='Build the mixed precision (mixture of single and '
'double precision) version')
variant('soa', default=True,
description='Build with Structure-of-Array instead of '
'Array-of-Structure code. Only for CPU code')
variant('timers', default=False,
description='Build with support for timers')
variant('da', default=False,
description='Install with support for basic data analysis tools')
variant('gui', default=False,
description='Install with Matplotlib (long installation time)')
variant('afqmc', default=False,
description='Install with AFQMC support. NOTE that if used in '
'combination with CUDA, only AFQMC will have CUDA.')
variant('ppconvert', default=False,
description='Install with pseudopotential converter.')
# Notes about CUDA-centric peculiarities:
#
# cuda variant implies mixed precision variant by default, but there is
# no way to express this in variant syntax, need something like
# variant('+mixed', default=True, when='+cuda', description="...")
#
# cuda+afqmc variant will not build the legacy CUDA code in real-space
# QMCPACK. This is due to a conflict in the build system. This is not
# worth fixing since the legacy CUDA code, will be superseded
# by the OpenMP 4.5 code.
# high-level variant conflicts
conflicts(
'+phdf5',
when='~mpi',
msg='Parallel collective I/O requires MPI-enabled QMCPACK. '
'Please add "~phdf5" to the Spack install line for serial QMCPACK.')
conflicts(
'+soa',
when='+cuda@:3.4.0',
msg='QMCPACK CUDA+SOA variant does not exist prior to v. 3.5.0.')
conflicts('^openblas+ilp64',
msg='QMCPACK does not support OpenBLAS 64-bit integer variant')
# Omitted for now due to concretizer bug
# conflicts('^intel-mkl+ilp64',
# msg='QMCPACK does not support MKL 64-bit integer variant')
# QMCPACK 3.6.0 or later requires support for C++14
compiler_warning = 'QMCPACK 3.6.0 or later requires a ' \
'compiler with support for C++14'
conflicts('%gcc@:4', when='@3.6.0:', msg=compiler_warning)
conflicts('%intel@:17', when='@3.6.0:', msg=compiler_warning)
conflicts('%pgi@:17', when='@3.6.0:', msg=compiler_warning)
conflicts('%llvm@:3.4', when='@3.6.0:', msg=compiler_warning)
conflicts('+afqmc', when='@:3.6.0', msg='AFQMC not recommended before v3.7')
conflicts('+afqmc', when='~mpi', msg='AFQMC requires building with +mpi')
conflicts('+afqmc', when='%gcc@:6.0', msg='AFQMC code requires [email protected] or greater')
conflicts('+afqmc', when='%apple-clang@:9.2', msg='AFQMC code requires clang 4.1 or greater')
conflicts('+afqmc', when='%clang@:4.0', msg='AFQMC code requires clang 4.1 or greater')
conflicts('+afqmc', when='%intel@:18', msg='AFQMC code requires intel19 or greater')
# Prior to QMCPACK 3.5.0 Intel MKL was not properly detected with
# non-Intel compilers without a Spack-based hack. This hack
# had the potential for negative side effects and led to more
# complex Python code that would have been difficult to maintain
# long term. Note that this has not been an issue since QMCPACK 3.5.0.
# For older versions of QMCPACK, we issue a conflict below if you
# try to use Intel MKL with a non-Intel compiler.
mkl_warning = 'QMCPACK releases prior to 3.5.0 require the ' \
'Intel compiler when linking against Intel MKL'
conflicts('%gcc', when='@:3.4.0 ^intel-mkl', msg=mkl_warning)
conflicts('%pgi', when='@:3.4.0 ^intel-mkl', msg=mkl_warning)
conflicts('%llvm', when='@:3.4.0 ^intel-mkl', msg=mkl_warning)
# Dependencies match those in the QMCPACK manual.
# FIXME: once concretizer can unite unconditional and conditional
# dependencies, some of the '~mpi' variants below will not be necessary.
# Essential libraries
depends_on('[email protected]:', when='@:3.5.0', type='build')
depends_on('[email protected]:', when='@3.6.0:', type='build')
depends_on('boost')
depends_on('[email protected]:', when='@3.6.0:')
depends_on('libxml2')
depends_on('mpi', when='+mpi')
depends_on('python@3:', when='@3.9:')
# HDF5
depends_on('hdf5~mpi', when='~phdf5')
depends_on('hdf5+mpi', when='+phdf5')
# Math libraries
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api@3')
# qmcpack data analysis tools
# basic command line tool based on Python and NumPy
# It may be necesseary to disable the blas and lapack
# when building the 'py-numpy' package, but it should not be a hard
# dependency on the 'py-numpy~blas~lapack' variant
depends_on('py-numpy', when='+da', type='run')
# GUI is optional for data anlysis
# py-matplotlib leads to a long complex DAG for dependencies
depends_on('py-matplotlib', when='+gui', type='run')
# Backport several patches from recent versions of QMCPACK
# The test_numerics unit test is broken prior to QMCPACK 3.3.0
patch_url = 'https://patch-diff.githubusercontent.com/raw/QMCPACK/qmcpack/pull/621.patch'
patch_checksum = 'e2ff7a6f0f006856085d4aab6d31f32f16353e41f760a33a7ef75f3ecce6a5d6'
patch(patch_url, sha256=patch_checksum, when='@3.1.0:3.3.0')
# FindMKL.cmake has an issues prior to QMCPACK 3.3.0
patch_url = 'https://patch-diff.githubusercontent.com/raw/QMCPACK/qmcpack/pull/623.patch'
patch_checksum = '3eb9dec05fd1a544318ff84cd8b5926cfc6b46b375c7f3b012ccf0b50cf617b7'
patch(patch_url, sha256=patch_checksum, when='@3.1.0:3.3.0')
# git-rev files for not git builds issues prior to QMCPACK 3.3.0
patch_url = 'https://patch-diff.githubusercontent.com/raw/QMCPACK/qmcpack/pull/643.patch'
patch_checksum = 'c066c79901a612cf8848135e0d544efb114534cca70b90bfccc8ed989d3d9dde'
patch(patch_url, sha256=patch_checksum, when='@3.1.0:3.3.0')
# the default flag_handler for Spack causes problems for QMCPACK
# https://spack.readthedocs.io/en/latest/packaging_guide.html#the-build-environment:
flag_handler = CMakePackage.build_system_flags
@when('@:3.7.0')
def patch(self):
# FindLibxml2QMC.cmake doesn't check the environment by default
# for libxml2, so we fix that.
filter_file(r'$ENV{LIBXML2_HOME}/lib',
'${LIBXML2_HOME}/lib $ENV{LIBXML2_HOME}/lib',
'CMake/FindLibxml2QMC.cmake')
@property
def build_targets(self):
spec = self.spec
targets = ['all']
if '+ppconvert' in spec:
targets.append('ppconvert')
return targets
def cmake_args(self):
spec = self.spec
args = []
# This issue appears specifically with the the Intel compiler,
# but may be an issue with other compilers as well. The final fix
# probably needs to go into QMCPACK's CMake instead of in Spack.
# QMCPACK binaries are linked with the C++ compiler, but *may* contain
# Fortran libraries such as NETLIB-LAPACK and OpenBLAS on the link
# line. For the case of the Intel C++ compiler, we need to manually
# add a libray from the Intel Fortran compiler.
if '%intel' in spec:
args.append('-DQMC_EXTRA_LIBS=-lifcore')
# Currently FFTW_HOME and LIBXML2_HOME are used by CMake.
# Any CMake warnings about other variables are benign.
# Starting with QMCPACK 3.8.0, CMake uses the builtin find(libxml2)
# function
if spec.satisfies('@:3.7.0'):
xml2_prefix = spec['libxml2'].prefix
args.append('-DLIBXML2_HOME={0}'.format(xml2_prefix))
args.append(
'-DLibxml2_INCLUDE_DIRS={0}'.format(xml2_prefix.include))
args.append('-DLibxml2_LIBRARY_DIRS={0}'.format(xml2_prefix.lib))
if '^fftw@3:' in spec:
fftw_prefix = spec['fftw'].prefix
args.append('-DFFTW_HOME={0}'.format(fftw_prefix))
args.append('-DFFTW_INCLUDE_DIRS={0}'.format(fftw_prefix.include))
args.append('-DFFTW_LIBRARY_DIRS={0}'.format(fftw_prefix.lib))
args.append('-DBOOST_ROOT={0}'.format(self.spec['boost'].prefix))
args.append('-DHDF5_ROOT={0}'.format(self.spec['hdf5'].prefix))
# Default is MPI, serial version is convenient for cases, e.g. laptops
if '+mpi' in spec:
args.append('-DQMC_MPI=1')
else:
args.append('-DQMC_MPI=0')
# Default is parallel collective I/O enabled
if '+phdf5' in spec:
args.append('-DENABLE_PHDF5=1')
else:
args.append('-DENABLE_PHDF5=0')
# Default is real-valued single particle orbitals
if '+complex' in spec:
args.append('-DQMC_COMPLEX=1')
else:
args.append('-DQMC_COMPLEX=0')
if '+afqmc' in spec:
args.append('-DBUILD_AFQMC=1')
else:
args.append('-DBUILD_AFQMC=0')
# When '-DQMC_CUDA=1', CMake automatically sets:
# '-DQMC_MIXED_PRECISION=1'
#
# There is a double-precision CUDA path, but it is not as well
# tested.
if '+cuda' in spec:
# Cannot support both CUDA builds at the same time, see
# earlier notes in this package.
if '+afqmc' in spec:
args.append('-DENABLE_CUDA=1')
else:
args.append('-DQMC_CUDA=1')
cuda_arch_list = spec.variants['cuda_arch'].value
cuda_arch = cuda_arch_list[0]
if len(cuda_arch_list) > 1:
raise InstallError(
'QMCPACK only supports compilation for a single '
'GPU architecture at a time'
)
if cuda_arch != 'none':
args.append('-DCUDA_ARCH=sm_{0}'.format(cuda_arch))
else:
# This is the default value set in QMCPACK's CMake
# Not possible to set default value for cuda_arch,
# thus this won't be stored in the spec, which is
# a problem.
args.append('-DCUDA_ARCH=sm_35')
else:
args.append('-DQMC_CUDA=0')
# Mixed-precision versues double-precision CPU and GPU code
if '+mixed' in spec:
args.append('-DQMC_MIXED_PRECISION=1')
else:
args.append('-DQMC_MIXED_PRECISION=0')
# New Structure-of-Array (SOA) code, much faster than default
# Array-of-Structure (AOS) code.
# No support for local atomic orbital basis.
if '+soa' in spec:
args.append('-DENABLE_SOA=1')
else:
args.append('-DENABLE_SOA=0')
# Manual Timers
if '+timers' in spec:
args.append('-DENABLE_TIMERS=1')
else:
args.append('-DENABLE_TIMERS=0')
# Proper detection of optimized BLAS and LAPACK.
# Based on the code from the deal II Spack package:
# https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/dealii/package.py
#
# Basically, we override CMake's auto-detection mechanism
# and use the Spack's interface instead.
#
# For version of QMCPACK prior to 3.5.0, the lines
# below are used for detection of all math libraries.
# For QMCPACK 3.5.0 and later, the lines below are only
# needed when MKL is *not* used. Thus, it is redundant
# but there are no negative side effects.
lapack_blas = spec['lapack'].libs + spec['blas'].libs
args.extend([
'-DLAPACK_FOUND=true',
'-DLAPACK_LIBRARIES=%s' % lapack_blas.joined(';')
])
# Next two environment variables were introduced in QMCPACK 3.5.0
# Prior to v3.5.0, these lines should be benign but CMake
# may issue a warning.
if '^mkl' in spec:
args.append('-DENABLE_MKL=1')
args.append('-DMKL_ROOT=%s' % env['MKLROOT'])
else:
args.append('-DENABLE_MKL=0')
# ppconvert is not build by default because it may exhibit numerical
# issues on some systems
if '+ppconvert' in spec:
args.append('-DBUILD_PPCONVERT=1')
else:
args.append('-DBUILD_PPCONVERT=0')
return args
# QMCPACK needs custom install method for a couple of reasons:
# Firstly, wee follow the recommendation on the Spack website
# for defining the compilers variables to be the MPI compiler wrappers.
# https://spack.readthedocs.io/en/latest/packaging_guide.html#compiler-wrappers
#
# Note that 3.6.0 release and later has a functioning 'make install',
# but still does not install nexus, manual, etc. So, there is no compelling
# reason to use QMCPACK's built-in version at this time.
def install(self, spec, prefix):
if '+mpi' in spec:
env['CC'] = spec['mpi'].mpicc
env['CXX'] = spec['mpi'].mpicxx
env['F77'] = spec['mpi'].mpif77
env['FC'] = spec['mpi'].mpifc
# create top-level directory
mkdirp(prefix)
# We assume cwd is self.stage.source_path, then
# install manual, labs, and nexus
install_tree('manual', prefix.manual)
install_tree('labs', prefix.labs)
install_tree('nexus', prefix.nexus)
# install binaries
with working_dir(self.build_directory):
install_tree('bin', prefix.bin)
def setup_run_environment(self, env):
"""Set-up runtime environment for QMCPACK.
Set PATH and PYTHONPATH for basic analysis scripts for Nexus."""
env.prepend_path('PATH', self.prefix.nexus.bin)
env.prepend_path('PYTHONPATH', self.prefix.nexus.lib)
@run_after('build')
@on_package_attributes(run_tests=True)
def check_install(self):
"""Run ctest after building binary.
It can take over 24 hours to run all the regression tests, here we
only run the unit tests and deterministic tests. If the unit tests
fail, the QMCPACK installation aborts. If the deterministic tests
fails, QMCPACK will still install and emit a warning message."""
with working_dir(self.build_directory):
ctest('-R', 'unit')
try:
ctest('-R', 'deterministic', '-LE', 'unstable')
except ProcessError:
warn = 'Unit tests passed, but deterministic tests failed.\n'
warn += 'Please report this failure to:\n'
warn += 'https://github.com/QMCPACK/qmcpack/issues'
tty.msg(warn)
|
lgpl-2.1
|
stylianos-kampakis/scikit-learn
|
sklearn/linear_model/least_angle.py
|
61
|
54324
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
bsd-3-clause
|
1ijingwei/1ijingwei.github.io
|
markdown_generator/talks.py
|
199
|
4000
|
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
|
mit
|
stscieisenhamer/glue
|
glue/core/util.py
|
1
|
11445
|
from __future__ import absolute_import, division, print_function
import logging
from itertools import count
from functools import partial
import numpy as np
import pandas as pd
from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator
from matplotlib.ticker import (LogFormatterMathtext, ScalarFormatter,
FuncFormatter)
__all__ = ["relim", "split_component_view", "join_component_view",
"facet_subsets", "colorize_subsets", "disambiguate",
"row_lookup", 'small_view', 'small_view_array', 'visible_limits',
'tick_linker', 'update_ticks']
def relim(lo, hi, log=False):
logging.getLogger(__name__).debug("Inputs to relim: %r %r", lo, hi)
x, y = lo, hi
if log:
if lo < 0:
x = 1e-5
if hi < 0:
y = 1e5
return x * .95, y * 1.05
delta = y - x
return (x - .02 * delta, y + .02 * delta)
def split_component_view(arg):
"""Split the input to data or subset.__getitem__ into its pieces.
:param arg: The input passed to data or subset.__getitem__.
Assumed to be either a scalar or tuple
:rtype: tuple
The first item is the Component selection (a ComponentID or
string)
The second item is a view (tuple of slices, slice scalar, or view
object)
"""
if isinstance(arg, tuple):
if len(arg) == 1:
raise TypeError("Expected a scalar or >length-1 tuple, "
"got length-1 tuple")
if len(arg) == 2:
return arg[0], arg[1]
return arg[0], arg[1:]
else:
return arg, None
def join_component_view(component, view):
"""Pack a componentID and optional view into single tuple
Returns an object compatible with data.__getitem__ and related
methods. Handles edge cases of when view is None, a scalar, a
tuple, etc.
:param component: ComponentID
:param view: view into data, or None
"""
if view is None:
return component
result = [component]
try:
result.extend(view)
except TypeError: # view is a scalar
result = [component, view]
return tuple(result)
def facet_subsets(data_collection, cid, lo=None, hi=None, steps=5,
prefix='', log=False):
"""Create a series of subsets that partition the values of
a particular attribute into several bins
This creates `steps` new subet groups, adds them to the data collection,
and returns the list of newly created subset groups.
:param data: DataCollection object to use
:type data: :class:`~glue.core.data_collection.DataCollection`
:param cid: ComponentID to facet on
:type data: :class:`~glue.core.component_id.ComponentID`
:param lo: The lower bound for the faceting. Defaults to minimum value
in data
:type lo: float
:param hi: The upper bound for the faceting. Defaults to maximum
value in data
:type hi: float
:param steps: The number of subsets to create. Defaults to 5
:type steps: int
:param prefix: If present, the new subset labels will begin with `prefix`
:type prefix: str
:param log: If True, space divisions logarithmically. Default=False
:type log: bool
:returns: List of :class:`~glue.core.subset_group.SubsetGroup` instances
added to `data`
Example::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2)
creates 2 new subsets. The first represents the constraint 0 <=
mass < 5. The second represents 5 <= mass <= 10::
facet_subset(data, data.id['mass'], lo=10, hi=0, steps=2)
Creates 2 new subsets. The first represents the constraint 10 >= x > 5
The second represents 5 >= mass >= 0::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2, prefix='m')
Labels the subsets ``m_1`` and ``m_2``.
Note that the last range is inclusive on both sides. For example, if ``lo``
is 0 and ``hi`` is 5, and ``steps`` is 5, then the intervals for the subsets
are [0,1), [1,2), [2,3), [3,4), and [4,5].
"""
from glue.core.exceptions import IncompatibleAttribute
if lo is None or hi is None:
for data in data_collection:
try:
vals = data[cid]
break
except IncompatibleAttribute:
continue
else:
raise ValueError("Cannot infer data limits for ComponentID %s"
% cid)
if lo is None:
lo = np.nanmin(vals)
if hi is None:
hi = np.nanmax(vals)
reverse = lo > hi
if log:
rng = np.logspace(np.log10(lo), np.log10(hi), steps + 1)
else:
rng = np.linspace(lo, hi, steps + 1)
states = []
labels = []
for i in range(steps):
# The if i < steps - 1 clauses are needed because the last interval
# has to be inclusive on both sides.
if reverse:
if i < steps - 1:
states.append((cid <= rng[i]) & (cid > rng[i + 1]))
labels.append(prefix + '{0}<{1}<={2}'.format(rng[i + 1], cid, rng[i]))
else:
states.append((cid <= rng[i]) & (cid >= rng[i + 1]))
labels.append(prefix + '{0}<={1}<={2}'.format(rng[i + 1], cid, rng[i]))
else:
if i < steps - 1:
states.append((cid >= rng[i]) & (cid < rng[i + 1]))
labels.append(prefix + '{0}<={1}<{2}'.format(rng[i], cid, rng[i + 1]))
else:
states.append((cid >= rng[i]) & (cid <= rng[i + 1]))
labels.append(prefix + '{0}<={1}<={2}'.format(rng[i], cid, rng[i + 1]))
result = []
for lbl, s in zip(labels, states):
sg = data_collection.new_subset_group(label=lbl, subset_state=s)
result.append(sg)
return result
def colorize_subsets(subsets, cmap, lo=0, hi=1):
"""Re-color a list of subsets according to a colormap
:param subsets: List of subsets
:param cmap: Matplotlib colormap instance
:param lo: Start location in colormap. 0-1. Defaults to 0
:param hi: End location in colormap. 0-1. Defaults to 1
The colormap will be sampled at `len(subsets)` even intervals
between `lo` and `hi`. The color at the `ith` interval will be
applied to `subsets[i]`
"""
from matplotlib import cm
sm = cm.ScalarMappable(cmap=cmap)
sm.norm.vmin = 0
sm.norm.vmax = 1
vals = np.linspace(lo, hi, len(subsets))
rgbas = sm.to_rgba(vals)
for color, subset in zip(rgbas, subsets):
r, g, b, a = color
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
subset.style.color = '#%2.2x%2.2x%2.2x' % (r, g, b)
def disambiguate(label, taken):
"""If necessary, add a suffix to label to avoid name conflicts
:param label: desired label
:param taken: set of taken names
Returns label if it is not in the taken set. Otherwise, returns
label_NN where NN is the lowest integer such that label_NN not in taken.
"""
if label not in taken:
return label
suffix = "_%2.2i"
label = str(label)
for i in count(1):
candidate = label + (suffix % i)
if candidate not in taken:
return candidate
def row_lookup(data, categories):
"""
Lookup which row in categories each data item is equal to
:param data: array-like
:param categories: array-like of unique values
:returns: Float array.
If result[i] is finite, then data[i] = categoreis[result[i]]
Otherwise, data[i] is not in the categories list
"""
# np.searchsorted doesn't work on mixed types in Python3
ndata, ncat = len(data), len(categories)
data = pd.DataFrame({'data': data, 'row': np.arange(ndata)})
cats = pd.DataFrame({'categories': categories,
'cat_row': np.arange(ncat)})
m = pd.merge(data, cats, left_on='data', right_on='categories')
result = np.zeros(ndata, dtype=float) * np.nan
result[np.array(m.row)] = m.cat_row
return result
def small_view(data, attribute):
"""
Extract a downsampled view from a dataset, for quick
statistical summaries
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
return data[attribute, view]
def small_view_array(data):
"""
Same as small_view, except using a numpy array as input
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
return np.asarray(data)[view]
def visible_limits(artists, axis):
"""
Determines the data limits for the data in a set of artists.
Ignores non-visible artists
Assumes each artist as a get_data method wich returns a tuple of x,y
Returns a tuple of min, max for the requested axis, or None if no data
present
:param artists: An iterable collection of artists
:param axis: Which axis to compute. 0=xaxis, 1=yaxis
"""
data = []
for art in artists:
if not art.visible:
continue
xy = art.get_data()
assert isinstance(xy, tuple)
val = xy[axis]
if val.size > 0:
data.append(xy[axis])
if len(data) == 0:
return
data = np.hstack(data)
if data.size == 0:
return
data = data[np.isfinite(data)]
if data.size == 0:
return
lo, hi = np.nanmin(data), np.nanmax(data)
if not np.isfinite(lo):
return
return lo, hi
def tick_linker(all_categories, pos, *args):
# We need to take care to ignore negative indices since these would actually
# 'work' 'when accessing all_categories, but we need to avoid that.
if pos < 0 or pos >= len(all_categories):
return ''
else:
try:
pos = np.round(pos)
return all_categories[int(pos)]
except IndexError:
return ''
def update_ticks(axes, coord, components, is_log):
"""
Changes the axes to have the proper tick formatting based on the type of
component.
:param axes: A matplotlib axis object to alter
:param coord: 'x' or 'y'
:param components: A list() of components that are plotted along this axis
:param is_log: Boolean for log-scale.
:kwarg max_categories: The maximum number of categories to display.
:return: None or #categories if components is Categorical
"""
if coord == 'x':
axis = axes.xaxis
elif coord == 'y':
axis = axes.yaxis
else:
raise TypeError("coord must be one of x,y")
is_cat = all(comp.categorical for comp in components)
if is_log:
axis.set_major_locator(LogLocator())
axis.set_major_formatter(LogFormatterMathtext())
elif is_cat:
all_categories = np.empty((0,), dtype=np.object)
for comp in components:
all_categories = np.union1d(comp.categories, all_categories)
locator = MaxNLocator(10, integer=True)
locator.view_limits(0, all_categories.shape[0])
format_func = partial(tick_linker, all_categories)
formatter = FuncFormatter(format_func)
axis.set_major_locator(locator)
axis.set_major_formatter(formatter)
return all_categories.shape[0]
else:
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
|
bsd-3-clause
|
dmitriz/zipline
|
tests/risk/answer_key.py
|
39
|
11989
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
|
apache-2.0
|
jmanday/Master
|
TFM/scripts/matching-BruteForce-LipVireo.py
|
1
|
5171
|
# -*- coding: utf-8 -*-
#########################################################################
### Jesus Garcia Manday
### matching-BruteForce-LipVireo.py
### @Descripcion: script para calcular el matching entre dos conjuntos de
### de descriptores de dos imágenes usando el algoritmo
### fuerza bruta en el que por cada descriptor en el primer
### conjunto busca el descriptor más cercano en el segundo
### conjunto probando uno a uno. Usando el detector SIFT. Esta
### variante presenta que se han utilizado los detectores de
### puntos de interés de Harris-Laplace, Hessian-Laplace y
### Fast-Hessian dentro de la librería Lip-Vireo.
#########################################################################
import os
import sys
import numpy as np
import cv2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import csv
PATH_OUTPUTS_DES_IMAGES = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/lip-vireo-des-time/"
class Descriptor:
def __init__(self, numkp, m, d):
self.numkp = numkp
self.m = m
self.d = d
def getNameFile(file):
fileName = ""
if (len(file.split("R")) > 1):
fileName = file.split("R")[0]
else:
if (len(file.split("L")) > 1):
fileName = file.split("L")[0]
return fileName
def generateDescriptorFromFile(nameFile):
nLine = 0
features = []
descriptors = []
with open(nameFile, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if (nLine == 0):
res = row[0].split(" ")
des = Descriptor(int(res[0]), int(res[1]), int(res[2]))
else:
if (nLine >= 2):
feature = row[0].split(" ")
if (len(features) < des.d):
for f in feature:
if (f != ''):
features.append((float(f)))
else:
descriptors.append(features)
features = []
nLine += 1
descriptors.append(features)
return descriptors
def matchingBruteForce(desFilesTrainImages, desFilesQueryImages, descriptor, detector):
print(descriptor, "---", detector)
valuesDataMatching = []
results = []
desFilesQueryImages.sort()
desFilesTrainImages.sort()
for desImgQuery in desFilesQueryImages:
nMatch = 0
index = 0
firstImage = ""
nameImgQuery = getNameFile(desImgQuery)
des1 = generateDescriptorFromFile(PATH_OUTPUTS_DES_IMAGES + descriptor + "/" + detector + "/query-images/" + desImgQuery)
for desImgTrain in desFilesTrainImages:
nameImgTrain = getNameFile(desImgTrain)
des2 = generateDescriptorFromFile(PATH_OUTPUTS_DES_IMAGES + descriptor + "/" + detector + "/train-images/" + desImgTrain)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(np.asarray(des1,np.float32), np.asarray(des2,np.float32), k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
if ((nameImgTrain == firstImage) or (firstImage == "")):
nMatch = nMatch + len(good)
else:
valuesDataMatching.append({"imageQuery": nameImgQuery, "imageTrain": firstImage, "value": nMatch})
nMatch = len(good)
firstImage = nameImgTrain
firstImage = ""
nMatch = 0
valM = max(valuesDataMatching, key=lambda item:item['value'])
print(valM)
results.append(valM)
valuesDataMatching = []
nameCSV = "results-BruteForce-" + descriptor + "-" + detector + ".csv"
with open(nameCSV, 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Image Query', 'Image Train', "Value matching"])
for rs in results:
filewriter.writerow([rs['imageQuery'], rs['imageTrain'], rs['value']])
def getFilesFromDescriptor(descriptor):
desFilesImages = []
path = PATH_OUTPUTS_DES_IMAGES + descriptor
direc = os.listdir(path)
direc.sort()
nPath = ""
for d in direc:
nPath = path + "/" + d
dirs = os.listdir(nPath)
print(nPath)
for d2 in dirs:
files = os.listdir(nPath + "/" + d2)
desFilesImages.append(files)
matchingBruteForce(desFilesImages[0], desFilesImages[1], descriptor, d)
desFilesImages.clear()
if __name__ == "__main__":
nameDesc = sys.argv[1]
getFilesFromDescriptor(nameDesc)
|
apache-2.0
|
fejoa/IVANWorldmapResearch
|
PoissonDiskGeneratorIntegers.py
|
1
|
3548
|
# -*- coding: utf-8 -*-
"""
IVAN Worldmap Research
Copyright (C) Ryan van Herel
Released under the GNU General
Public License
See LICENSING which should be included
along with this file for more details
@author: fejoa
code adapted from an implementation of poisson disc sampling by Connor Johnson
http://connor-johnson.com/2015/04/08/poisson-disk-sampling/
"""
import numpy as np
from random import random
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
"""
class pds:
def __init__( self, w, h, r, n ):
# w and h are the width and height of the field
self.w = w
self.h = h
# n is the number of test points
self.n = n
self.r2 = r**2
self.A = 3*self.r2
# cs is the cell size
self.cs = r / np.sqrt(2)
# gw and gh are the number of grid cells
self.gw = int( np.ceil( self.w/self.cs ) )
self.gh = int( np.ceil( self.h/self.cs ) )
# create a grid and a queue
self.grid = [ None ] * self.gw * self.gh
self.queue = list()
# set the queue size and sample size to zero
self.qs, self.ss = 0, 0
def distance( self, x, y ):
# find where (x,y) sits in the grid
x_idx = int( x/self.cs )
y_idx = int( y/self.cs )
# determine a neighborhood of cells around (x,y)
x0 = max( x_idx-2, 0 )
y0 = max( y_idx-2, 0 )
x1 = max( x_idx-3, self.gw )
y1 = max( y_idx-3, self.gh )
# search around (x,y)
for y_idx in range( y0, y1 ):
for x_idx in range( x0, x1 ):
step = y_idx*self.gw + x_idx
# if the sample point exists on the grid
if self.grid[ step ]:
s = self.grid[ step ]
dx = ( s[0] - x )**2
dy = ( s[1] - y )**2
# and it is too close
if dx + dy < self.r2:
# then barf
return False
return True
def set_point( self, x, y ):
s = [ x, y ]
self.queue.append( s )
# find where (x,y) sits in the grid
x_idx = int( x/self.cs )
y_idx = int( y/self.cs )
step = self.gw*y_idx + x_idx
self.grid[ step ] = s
self.qs += 1
self.ss += 1
return s
def rvs( self ):
if self.ss == 0:
x = random() * self.w
y = random() * self.h
self.set_point( x, y )
while self.qs:
x_idx = int( random() * self.qs )
s = self.queue[ x_idx ]
for y_idx in range( self.n ):
a = 2 * np.pi * random()
b = np.sqrt( self.A * random() + self.r2 )
x = s[0] + b*np.cos( a )
y = s[1] + b*np.sin( a )
if( x >= 0 )and( x < self.w ):
if( y >= 0 )and( y < self.h ):
if( self.distance( x, y ) ):
self.set_point( x, y )
del self.queue[x_idx]
self.qs -= 1
sample = list( filter( None, self.grid ) )
sample = np.asfarray( sample )
return sample
"""
obj = pds( 128, 128, 10, 10 )
sample1 = obj.rvs()
#sample2 = obj.rvs()
x1 = [int(p[0]) for p in sample1]
y1 = [int(q[1]) for q in sample1]
print "number of points in range is %d", len(x1)
print "x1 is", x1
plt.scatter(x1, y1, color='red')
plt.show()
#print "sample1[0,:] is %d", [p[0] for p in sample1]
"""
|
gpl-2.0
|
ingelectronicadj/FisicaConPython
|
FisicaCuantica/Concentracion de portadores intrinsecos/concentracionDePortadoresIntrinsecos.py
|
1
|
6888
|
# Autor: Diego Javier Mena
#Librerias necesarias para ejecutar el codigo
import numpy as num
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plot
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
import scipy.constants as sc
import matplotlib.patches as mpatches
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
home = Tk.Tk()
home.wm_title("Concentracion de portadores intrinsecos")
#Declaracion de limites
f = Figure()
plota = f.add_subplot(111)
#Declaracion de Variables
a = num.linspace(200,700,5000)#Temperatura desde 200 hasta 700 kelvin
#Definicion de las funciones
mnSi=1.08*sc.m_e
mnGaAs=0.068*sc.m_e
mnGe=0.55*sc.m_e
mnGaN=0.19*sc.m_e
mnGaP=0.9*sc.m_e
mnInP=0.07*sc.m_e
mpSi=1.1*sc.m_e
mpGaAs=0.5*sc.m_e
mpGe=0.39*sc.m_e
mpGaN=0.8*sc.m_e
mpGaP=0.9*sc.m_e
mpInP=0.4*sc.m_e
#GaN
nc=2*((2*num.pi*mnGaN*sc.k*a)/(sc.h**2))**(3/2)/1e+6 #1+e6 para que sea en cm-3, es en m
nv=2*((2*num.pi*mpGaN*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=3.470-((7.70*10**(-4))*a**2/(a+600)) #GaN
b=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#GaP
nc=2*((2*num.pi*mnGaP*sc.k*a)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGaP*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=2.340-((6.20*10**(-4))*a**2/(a+460)) #GaP
c=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#GaAs
nc=2*((2*num.pi*mnGaAs*sc.k*a)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGaAs*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=1.519-((5.41*10**(-4))*a**2/(a+204)) #GaAS
d=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#InP
nc=2*((2*num.pi*mnInP*sc.k*a)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpInP*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=1.425-((4.50*10**(-4))*a**2/(a+327)) #InP
e=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#Si
nc=2*((2*num.pi*mnSi*sc.k*a)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpSi*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=1.170-((4.73*10**(-4))*a**2/(a+636)) #Si
g=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#Ge
nc=2*((2*num.pi*mnGe*sc.k*a)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGe*sc.k*a)/(sc.h**2))**(3/2)/1e+6
Eg=0.744-((4.77*10**(-4))*a**2/(a+235)) #Ge
h=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(a)*sc.k))
#Se les asigna el label correspondiente a cada eje
plota.semilogy(a,b,color='y',label='GaN')
plota.semilogy(a,c,color='b',label='GaP')
plota.semilogy(a,d,color='g',label='GaAS')
plota.semilogy(a,e,color='black',label='InP')
plota.semilogy(a,g,color='r',label='Si')
plota.semilogy(a,h,color='cyan',label='Ge')
plota.set_xlabel("$T \ (K)$")
plota.set_ylabel("$ni \ (cm-3)$",rotation='vertical')
plota.axis([200,700,0,10**17])
plota.legend(loc=4)
# Muestra la grafica-------------------------------------------------
plota.set_title('Concentracion de portadores intrinsecos')
f.tight_layout()
plota.grid(True)
canvas = FigureCanvasTkAgg(f,master=home)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, home)
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#Proceso de seleccion del teclado
R = "right"
L = "left"
U = "up"
D = "down"
x = 300
state=0
var = Tk.StringVar()
label = Tk.Label(home, textvariable=var, fg="black", bg="white", font = "Helvetica 14 bold italic")
var.set("Utilice las flechas del teclado para desplazarce por la grafica")
def graficar(x):
if state==0:
#Ge
nc=2*((2*num.pi*mnGe*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGe*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=0.744-((4.77*10**(-4))*x**2/(x+235)) #Ge
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
elif state==1:
#Si
nc=2*((2*num.pi*mnSi*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpSi*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=1.170-((4.73*10**(-4))*x**2/(x+636)) #Si
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
elif state==2:
#InP
nc=2*((2*num.pi*mnInP*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpInP*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=1.425-((4.50*10**(-4))*x**2/(x+327)) #InP
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
elif state==3:
#GaAs
nc=2*((2*num.pi*mnGaAs*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGaAs*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=1.519-((5.41*10**(-4))*x**2/(x+204)) #GaAS
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
elif state==4:
#GaP
nc=2*((2*num.pi*mnGaP*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGaP*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=2.340-((6.20*10**(-4))*x**2/(x+460)) #GaP
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
else:
#GaN
nc=2*((2*num.pi*mnGaN*sc.k*x)/(sc.h**2))**(3/2)/1e+6
nv=2*((2*num.pi*mpGaN*sc.k*x)/(sc.h**2))**(3/2)/1e+6
Eg=3.470-((7.70*10**(-4))*x**2/(x+600)) #GaN
y=num.sqrt(nc*nv)*num.exp(-Eg*sc.e/(2*(x)*sc.k))
n = num.linspace(x,x,2)
plota.cla()
plota.semilogy(a,b,color='y',label='GaN')
plota.semilogy(a,c,color='b',label='GaP')
plota.semilogy(a,d,color='g',label='GaAS')
plota.semilogy(a,e,color='black',label='InP')
plota.semilogy(a,g,color='r',label='Si')
plota.semilogy(a,h,color='cyan',label='Ge')
plota.set_xlabel("$T \ (K)$")
plota.set_ylabel("$ni \ (cm-3)$",rotation='vertical')
plota.legend(loc=4)
plota.axis([200,700,0,10**17])
# Muestra la grafica-------------------------------------------------
plota.set_title('Concentracion de portadores intrinsecos')
f.tight_layout()
plota.grid(True)
plota.scatter([x, ], [y, ], 50, color='red')
canvas.draw()
var.set("Valores sobre la curva\t ni= "+str(y)+" con T= "+str(x))
def on_key_event(event):
#Flecha de abajo
if event.key==D:
global state
global a
global b
global x
x = x
if state==5:
state=0
else:
state=state+1
graficar(x)
#Flecha de arriba
elif event.key==U:
global state
global a
global b
global x
x = x
if state==0:
state=5
else:
state=state-1
graficar(x)
#Flecha derecha
elif event.key==R:
global a
global b
global x
if x>=700:
x=700
else:
x = x + 10
graficar(x)
#Flecha Izquierda
elif event.key==L:
global a
global b
global x
if x<=200:
x=200
else:
x = x - 10
graficar(x)
else:
pass
key_press_handler(event, canvas, toolbar)
plot.text(4*10**(-6),1*10**(6) , r'$\ Area $')
canvas.mpl_connect('key_press_event', on_key_event)
label.pack(side="bottom", ipady=20)
#Continue el proceso
Tk.mainloop()
|
gpl-3.0
|
mariusvniekerk/impyla
|
impala/bdf.py
|
2
|
16457
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import datetime
from copy import copy
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import csv
import six
from six.moves import map
from six.moves import zip
import pandas as pd
from impala.context import ImpalaContext
from impala.util import (as_pandas, _random_id, _py_to_sql_string,
_get_table_schema_hack, warn_deprecate_ibis)
from impala._sql_model import (_to_TableName, BaseTableRef, JoinTableRef,
SelectItem, SelectStmt, UnionStmt, Literal,
InlineView, TableName, Expr, _create_table,
_create_table_as_select, LimitElement)
warn_deprecate_ibis('BigDataFrame')
# utilities
def _numpy_dtype_to_impala_PrimitiveType(ty):
"""Convert numpy dtype to Impala type string.
Used in converting pandas DataFrame to SQL/Impala
"""
# based on impl in pandas.io.sql.PandasSQLTable._sqlalchemy_type()
if ty is datetime.date:
# TODO: this might be wrong
return 'TIMESTAMP'
if pd.core.common.is_datetime64_dtype(ty):
# TODO: this might be wrong
return 'TIMESTAMP'
if pd.core.common.is_timedelta64_dtype(ty):
return 'BIGINT'
if pd.core.common.is_float_dtype(ty):
return 'DOUBLE'
if pd.core.common.is_integer_dtype(ty):
# TODO: BIGINT may be excessive?
return 'BIGINT'
if pd.core.common.is_bool(ty):
return 'BOOLEAN'
return 'STRING'
# BigDataFrame creation
def from_sql_query(ic, query, alias=None):
"""Create a BDF from a SQL query executed by Impala"""
query_alias = alias if alias else _random_id('inline_', 4)
table_ref = InlineView(query, query_alias)
schema = _get_table_schema_hack(ic._cursor, table_ref.to_sql())
select_list = tuple(
[SelectItem(expr=Literal(col)) for (col, ty) in schema])
return BigDataFrame(ic, SelectStmt(select_list, table_ref))
def from_sql_table(ic, table):
"""Create a BDF from a table name usable in Impala"""
table_name = _to_TableName(table)
table_ref = BaseTableRef(table_name)
schema = _get_table_schema_hack(ic._cursor, table_ref.to_sql())
select_list = tuple(
[SelectItem(expr=Literal(col)) for (col, ty) in schema])
return BigDataFrame(ic, SelectStmt(select_list, table_ref))
def from_hdfs(ic, path, schema, table=None, overwrite=False,
file_format='TEXTFILE', partition_schema=None,
field_terminator='\t', line_terminator='\n', escape_char='\\'):
"""Create a BDF backed by an external file in HDFS.
File must be Impala-compatible
"""
if partition_schema is not None:
raise NotImplementedError(
"Partitions not yet implemented in .from_hdfs()")
if table is None:
temp_table = _random_id('tmp_table_', 8)
table = "%s.%s" % (ic._temp_db, temp_table)
table_name = _to_TableName(table)
if overwrite:
ic._cursor.execute("DROP TABLE IF EXISTS %s" % table_name.to_sql())
create_stmt = _create_table(table_name, schema, path=path,
file_format=file_format,
field_terminator=field_terminator,
line_terminator=line_terminator,
escape_char=escape_char)
ic._cursor.execute(create_stmt)
return from_sql_table(ic, table_name.to_sql())
def from_pandas(ic, df, table=None, path=None, method='in_query',
file_format='TEXTFILE', field_terminator='\t',
line_terminator='\n', escape_char='\\', overwrite=False):
"""Create a BDF by shipping an in-memory pandas `DataFrame` into Impala
path is the dir, not the filename
"""
# TODO: this is not atomic
assert isinstance(ic, ImpalaContext)
temp_table = _random_id('tmp_table_', 8)
if table is None:
table = "%s.%s" % (ic._temp_db, temp_table)
if path is None:
path = os.path.join(ic._temp_dir, temp_table)
table_name = _to_TableName(table)
if overwrite:
ic._cursor.execute("DROP TABLE IF EXISTS %s" % table_name.to_sql())
columns = list(df.columns)
types = [_numpy_dtype_to_impala_PrimitiveType(ty) for ty in df.dtypes]
schema = list(zip(columns, types))
create_stmt = _create_table(table_name, schema, path=path,
file_format=file_format,
field_terminator=field_terminator,
line_terminator=line_terminator,
escape_char=escape_char)
ic._cursor.execute(create_stmt)
if method == 'in_query':
query = "INSERT INTO %s VALUES " % table_name.to_sql()
query += ', '.join(['(%s)' % ', '.join(map(_py_to_sql_string, row))
for row in df.values])
ic._cursor.execute(query)
elif method == 'webhdfs':
if file_format != 'TEXTFILE':
raise ValueError("only TEXTFILE format supported for webhdfs")
if path is None:
raise ValueError(
"must supply a path for EXTERNAL table for webhdfs")
hdfs_client = ic.hdfs_client()
raw_data = StringIO()
df.to_csv(raw_data, sep=field_terminator,
line_terminator=line_terminator, quoting=csv.QUOTE_NONE,
escapechar=escape_char, header=False, index=False)
hdfs_client.write(
os.path.join(path, 'data.txt'), raw_data.getvalue(),
overwrite=overwrite)
raw_data.close()
else:
raise ValueError(
"method must be 'in_query' or 'webhdfs'; got %s" % method)
return from_sql_table(ic, table_name.to_sql())
class BigDataFrame(object):
def __init__(self, ic, ast):
self._ic = ic
self._query_ast = ast
self._schema = None
@property
def schema(self):
if self._schema is None:
table_ref = InlineView(
self._query_ast.to_sql(), _random_id('inline_', 4))
self._schema = _get_table_schema_hack(
self._ic._cursor, table_ref.to_sql())
return self._schema
@property
def is_sorted(self):
if isinstance(self._query_ast, SelectStmt):
return self._query_ast._order_by is not None
# TODO: add warning that we're not sure if the BDF is already sorted
# (e.g., bc this BDF is built directly from an inline view of a user-
# supplied query string)
return False
def __getitem__(self, obj):
"""'Indexing' functionality for the BigDataFrame
Given a single object or list, the BDF will interpret it as a
relational projection (i.e., a selection of columns).
Given a tuple of length 2, the first element will be interpreted for
row selection (i.e., predicate/filter/WHERE clause), while the second
element will be interpreted as a projection.
"""
# other select/filter fns should be implemented with this one
if isinstance(obj, tuple) and len(obj) == 2:
alias = _random_id('inline_', 4)
table_ref = InlineView(self._query_ast.to_sql(), alias)
(limit_elt, where) = self._query_ast._filter(obj[0])
select_list = self._query_ast._projection(obj[1])
return BigDataFrame(
self._ic, SelectStmt(
select_list, table_ref, where=where, limit=limit_elt))
elif isinstance(obj, list):
alias = _random_id('inline_', 4)
table_ref = InlineView(self._query_ast.to_sql(), alias)
select_list = self._query_ast._projection(obj)
return BigDataFrame(self._ic, SelectStmt(select_list, table_ref))
else:
# single object, possibly a slice; wrap in list and get projection
return self[[obj]]
def join(self, other, on=None, how='inner', hint=None):
"""Join this BDF to another one.
`on` is `None`, `string`, `Expr`, or `list[string]`
"""
left = InlineView(self._query_ast.to_sql(), 'left_tbl')
right = InlineView(other._query_ast.to_sql(), 'right_tbl')
# SELECT left.*, right.*
select_list = [SelectItem(table_name=TableName(left.name)),
SelectItem(table_name=TableName(right.name))]
table_ref = JoinTableRef(left, right, on=on, op=how, hint=hint)
ast = SelectStmt(select_list, table_ref)
return BigDataFrame(self._ic, ast)
def group_by(self, by):
"""Group the BDF
`by` is `string`, `Expr`, or `list/tuple[string/Expr]`
"""
if not isinstance(by, (tuple, list)):
by = (by,)
if not all([isinstance(e, (six.string_types, Expr)) for e in by]):
raise ValueError("must supply only strings or Exprs")
by = tuple([e if isinstance(e, Expr) else Literal(e) for e in by])
table_ref = InlineView(self._query_ast.to_sql(), 'inner_tbl')
# invalid AST; to be used by GroupBy
incomplete_ast = SelectStmt([], table_ref, group_by=by)
return GroupBy(self._ic, incomplete_ast)
def concat(self, other):
"""Concatenate BDFs using a UNION statement.
Schemas must be compatible.
"""
if not isinstance(other, BigDataFrame):
raise ValueError("other must be a BigDataFrame objects")
if self.schema != other.schema:
raise ValueError("schema mismatch")
ast = UnionStmt([self._query_ast, other._query_ast])
return BigDataFrame(self._ic, ast)
def one_hot_categoricals(self, categoricals, prefix=None, dummy_na=False):
"""Convert categorical columns to one-hot encoding.
categoricals is an iterable of column names that should be treated as
categorical variables
"""
# TODO
raise NotImplementedError
# unique_values = {}
# for col in categoricals:
# distinct_query = "SELECT DISTINCT %s FROM %s" % (
# col, bdf.to_sql())
# self._cursor.execute(distinct_query)
# unique_values[col] = self._cursor.fetchall()
def _store(self, path=None, table_name=None, file_format='TEXTFILE',
field_terminator='\t', line_terminator='\n', escape_char='\\',
overwrite=False):
if overwrite:
self._cursor.execute(
"DROP TABLE IF EXISTS %s" % table_name.to_sql())
create_stmt = _create_table_as_select(
table_name, path=path, file_format=file_format,
field_terminator=field_terminator, line_terminator=line_terminator,
escape_char=escape_char)
query = create_stmt + self.to_sql()
self._cursor.execute(query)
return from_sql_table(self._ic, table_name.to_sql())
def store(self, path=None, table=None, file_format='TEXTFILE',
field_terminator='\t', line_terminator='\n', escape_char='\\',
overwrite=False):
"""Materialize the results and stores them in HFDS. Functions as an EXTERNAL table.
Implemented through a `CREATE TABLE AS SELECT`.
"""
temp_table = _random_id('tmp_table_', 8)
if table is None:
table = "%s.%s" % (self._temp_db, temp_table)
if path is None:
path = os.path.join(self._temp_dir, temp_table)
table_name = _to_TableName(table)
return self._store(path=path, table_name=table_name, file_format=file_format, field_terminator=field_terminator,
line_terminator=line_terminator, escape_char=escape_char, overwrite=overwrite)
def store_managed(self, table, file_format='PARQUET', field_terminator='\t', line_terminator='\n', escape_char='\\',
overwrite=False):
"""Materialize the results and stores them in HDFS as an impala managed table.
Implemented through a `CREATE TABLE AS SELECT`.
"""
table_name = _to_TableName(table)
return self._store(path=None, table_name=table_name, file_format=file_format, field_terminator=field_terminator,
line_terminator=line_terminator, escape_char=escape_char, overwrite=overwrite)
def save_view(self, name, overwrite=False):
"""Create a named view representing this BDF for later reference"""
# TODO: is this fn useful?
table_name = _to_TableName(name)
if overwrite:
self._ic._cursor.execute(
'DROP VIEW IF EXISTS %s' % table_name.to_sql())
sql = 'CREATE VIEW %s AS %s' % (table_name.to_sql(),
self._query_ast.to_sql())
self._ic._cursor.execute(sql)
return from_sql_table(self._ic, table_name.to_sql())
def __iter__(self):
"""Return an iterator object to iterate over rows locally"""
self._ic._cursor.execute(self._query_ast.to_sql())
return self._ic._cursor.__iter__()
def take(self, n):
"""Return `n` rows as a pandas `DataFrame`
Distributed and no notion of order, so not guaranteed to be
reproducible.
"""
alias = _random_id('inline_', 4)
table_ref = InlineView(self._query_ast.to_sql(), alias)
# SELECT alias.*
select_list = [SelectItem(table_name=TableName(table_ref.name))]
limit_elt = LimitElement(Literal(n), None)
ast = SelectStmt(select_list, table_ref, limit=limit_elt)
bdf = BigDataFrame(self._ic, ast)
return as_pandas(bdf.__iter__())
def collect(self):
"""Return the BDF data to the client as a pandas DataFrame"""
return as_pandas(self.__iter__())
def count(self):
count_query = ('SELECT COUNT(*) FROM (%s) AS count_tbl' %
self._query_ast.to_sql())
self._ic._cursor.execute(count_query)
return self._ic._cursor.fetchall()[0][0]
class GroupBy(object):
def __init__(self, ic, grouped_ast):
# NOTE: grouped_ast._select_list gets ignored
if grouped_ast._group_by is None:
raise ValueError("GroupBy requires an AST with a valid _group_by")
self._ic = ic
self._grouped_ast = grouped_ast
@property
def groups(self):
ast = copy(self._grouped_ast)
select_list = [SelectItem(expr=e) for e in self._grouped_ast._group_by]
ast._select_list = tuple(select_list)
return BigDataFrame(self._ic, ast)
def __getitem__(self, obj):
"""Expression evaluation against groups.
Given a single object or list, the GroupBy will interpret it as a set
of SELECT expressions to evaluate in the context of the GROUP BY.
Given a tuple of length 2, the first element will be interpreted for
group selection (i.e., a HAVING clause), while the second element will
be interpreted as a set of expressions to evaluate against the groups.
"""
ast = copy(self._grouped_ast)
if isinstance(obj, tuple) and len(obj) == 2:
if not isinstance(obj[0], Expr):
raise ValueError("The group filter (obj[0]) must be Expr type")
ast._having = obj[0]
obj = obj[1]
# obj is now the SELECT portion
if not isinstance(obj, (list, tuple)):
obj = [obj]
select_list = []
for elt in obj:
if isinstance(elt, SelectItem):
select_list.append(elt)
elif isinstance(elt, six.string_types):
select_list.append(SelectItem(expr=Literal(elt)))
elif isinstance(elt, Expr):
select_list.append(SelectItem(expr=elt))
ast._select_list = select_list
return BigDataFrame(self._ic, ast)
|
apache-2.0
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/core/datetools.py
|
9
|
1174
|
"""A collection of random tools for dealing with dates in Python"""
from pandas.tseries.tools import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
|
artistic-2.0
|
codrut3/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/io_test.py
|
137
|
5063
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
henridwyer/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
282
|
4059
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/frame/methods/test_asfreq.py
|
2
|
1970
|
from datetime import datetime
import numpy as np
from pandas import DataFrame, DatetimeIndex, Series, date_range
import pandas._testing as tm
from pandas.tseries import offsets
class TestAsFreq:
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = date_range("1/1/2016", periods=10, freq="2S")
ts = Series(np.arange(len(rng)), index=rng)
df = DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
|
bsd-3-clause
|
rcomer/iris
|
lib/iris/tests/test_pandas.py
|
5
|
18054
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import copy
import datetime
import unittest
import cf_units
import cftime
import matplotlib.units
import numpy as np
# Importing pandas has the side-effect of messing with the formatters
# used by matplotlib for handling dates.
default_units_registry = copy.copy(matplotlib.units.registry)
try:
import pandas
except ImportError:
# Disable all these tests if pandas is not installed.
pandas = None
matplotlib.units.registry = default_units_registry
skip_pandas = unittest.skipIf(
pandas is None, 'Test(s) require "pandas", ' "which is not available."
)
if pandas is not None:
from iris.coords import DimCoord
from iris.cube import Cube
import iris.pandas
@skip_pandas
class TestAsSeries(tests.IrisTest):
"""Test conversion of 1D cubes to Pandas using as_series()"""
def test_no_dim_coord(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
expected_index = np.array([0, 1, 2, 3, 4])
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_simple(self):
cube = Cube(np.array([0, 1, 2, 3, 4.4]), long_name="foo")
dim_coord = DimCoord([5, 6, 7, 8, 9], long_name="bar")
cube.add_dim_coord(dim_coord, 0)
expected_index = np.array([5, 6, 7, 8, 9])
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_masked(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4.4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data.astype("f").filled(np.nan))
def test_time_gregorian(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_coord = DimCoord(
[0, 100.1, 200.2, 300.3, 400.4],
long_name="time",
units="days since 2000-01-01 00:00",
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
datetime.datetime(2000, 1, 1, 0, 0),
datetime.datetime(2000, 4, 10, 2, 24),
datetime.datetime(2000, 7, 19, 4, 48),
datetime.datetime(2000, 10, 27, 7, 12),
datetime.datetime(2001, 2, 4, 9, 36),
]
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertListEqual(list(series.index), expected_index)
def test_time_360(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_unit = cf_units.Unit(
"days since 2000-01-01 00:00", calendar=cf_units.CALENDAR_360_DAY
)
time_coord = DimCoord(
[0, 100.1, 200.2, 300.3, 400.4], long_name="time", units=time_unit
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
cftime.Datetime360Day(2000, 1, 1, 0, 0),
cftime.Datetime360Day(2000, 4, 11, 2, 24),
cftime.Datetime360Day(2000, 7, 21, 4, 48),
cftime.Datetime360Day(2000, 11, 1, 7, 12),
cftime.Datetime360Day(2001, 2, 11, 9, 36),
]
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertArrayEqual(series.index, expected_index)
def test_copy_true(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([0, 1, 2, 3.3, 4]), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
_ = iris.pandas.as_series(cube, copy=False)
@skip_pandas
class TestAsDataFrame(tests.IrisTest):
"""Test conversion of 2D cubes to Pandas using as_data_frame()"""
def test_no_dim_coords(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
expected_index = [0, 1]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_no_x_coord(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
y_coord = DimCoord([10, 11], long_name="bar")
cube.add_dim_coord(y_coord, 0)
expected_index = [10, 11]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_no_y_coord(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
cube.add_dim_coord(x_coord, 1)
expected_index = [0, 1]
expected_columns = [10, 11, 12, 13, 14]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_simple(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
y_coord = DimCoord([15, 16], long_name="milk")
cube.add_dim_coord(x_coord, 1)
cube.add_dim_coord(y_coord, 0)
expected_index = [15, 16]
expected_columns = [10, 11, 12, 13, 14]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_masked(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
expected_index = [0, 1]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data.astype("f").filled(np.nan))
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_time_gregorian(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="ts"
)
day_offsets = [0, 100.1, 200.2, 300.3, 400.4]
time_coord = DimCoord(
day_offsets, long_name="time", units="days since 2000-01-01 00:00"
)
cube.add_dim_coord(time_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
nanoseconds_per_day = 24 * 60 * 60 * 1000000000
days_to_2000 = 365 * 30 + 7
# pandas Timestamp class cannot handle floats in pandas <v0.12
timestamps = [
pandas.Timestamp(
int(nanoseconds_per_day * (days_to_2000 + day_offset))
)
for day_offset in day_offsets
]
self.assertTrue(all(data_frame.columns == timestamps))
self.assertTrue(all(data_frame.index == [0, 1]))
def test_time_360(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="ts"
)
time_unit = cf_units.Unit(
"days since 2000-01-01 00:00", calendar=cf_units.CALENDAR_360_DAY
)
time_coord = DimCoord(
[100.1, 200.2], long_name="time", units=time_unit
)
cube.add_dim_coord(time_coord, 0)
expected_index = [
cftime.Datetime360Day(2000, 4, 11, 2, 24),
cftime.Datetime360Day(2000, 7, 21, 4, 48),
]
expected_columns = [0, 1, 2, 3, 4]
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertArrayEqual(data_frame.index, expected_index)
self.assertArrayEqual(data_frame.columns, expected_columns)
def test_copy_true(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]), long_name="foo"
)
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_int32_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32),
long_name="foo",
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_int64_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int64),
long_name="foo",
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_float_false(self):
cube = Cube(
np.array([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]]), long_name="foo"
)
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]],
)
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
_ = iris.pandas.as_data_frame(cube, copy=False)
def test_copy_false_with_cube_view(self):
data = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = Cube(data[:], long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
@skip_pandas
class TestSeriesAsCube(tests.IrisTest):
def test_series_simple(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_simple.cml")),
)
def test_series_object(self):
class Thing:
def __repr__(self):
return "A Thing"
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[Thing(), Thing(), Thing(), Thing(), Thing()],
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_object.cml")),
)
def test_series_masked(self):
series = pandas.Series(
[0, float("nan"), 2, np.nan, 4], index=[5, 6, 7, 8, 9]
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(("pandas", "as_cube", "series_masked.cml")),
)
def test_series_datetime_gregorian(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[
datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2),
datetime.datetime(2003, 3, 3, 3, 3, 3),
datetime.datetime(2004, 4, 4, 4, 4, 4),
datetime.datetime(2005, 5, 5, 5, 5, 5),
],
)
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(
("pandas", "as_cube", "series_datetime_gregorian.cml")
),
)
def test_series_cftime_360(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[
cftime.datetime(2001, 1, 1, 1, 1, 1),
cftime.datetime(2002, 2, 2, 2, 2, 2),
cftime.datetime(2003, 3, 3, 3, 3, 3),
cftime.datetime(2004, 4, 4, 4, 4, 4),
cftime.datetime(2005, 5, 5, 5, 5, 5),
],
)
self.assertCML(
iris.pandas.as_cube(
series, calendars={0: cf_units.CALENDAR_360_DAY}
),
tests.get_result_path(
("pandas", "as_cube", "series_netcdfimte_360.cml")
),
)
def test_copy_true(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series)
cube.data[0] = 99
self.assertEqual(series[5], 0)
def test_copy_false(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series, copy=False)
cube.data[0] = 99
self.assertEqual(series[5], 99)
@skip_pandas
class TestDataFrameAsCube(tests.IrisTest):
def test_data_frame_simple(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_simple.cml")
),
)
def test_data_frame_nonotonic(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[10, 10],
columns=[12, 12, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_nonotonic.cml")
),
)
def test_data_frame_masked(self):
data_frame = pandas.DataFrame(
[[0, float("nan"), 2, 3, 4], [5, 6, 7, np.nan, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_masked.cml")
),
)
def test_data_frame_multidim(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[0, 1],
columns=["col_1", "col_2", "col_3", "col_4", "col_5"],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_multidim.cml")
),
)
def test_data_frame_cftime_360(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[
cftime.datetime(2001, 1, 1, 1, 1, 1),
cftime.datetime(2002, 2, 2, 2, 2, 2),
],
columns=[10, 11, 12, 13, 14],
)
self.assertCML(
iris.pandas.as_cube(
data_frame, calendars={0: cf_units.CALENDAR_360_DAY}
),
tests.get_result_path(
("pandas", "as_cube", "data_frame_netcdftime_360.cml")
),
)
def test_data_frame_datetime_gregorian(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
index=[
datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2),
],
columns=[10, 11, 12, 13, 14],
)
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(
("pandas", "as_cube", "data_frame_datetime_gregorian.cml")
),
)
def test_copy_true(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 0)
def test_copy_false(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame, copy=False)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 99)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
Sapphirine/Human-Activity-Monitoring-and-Prediction
|
analysis.py
|
1
|
6718
|
__author__ = 'Chao'
import numpy as np
from sklearn import svm, cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
activity_label = {'1': 'WALKING',
'2': 'WALKING_UPSTAIRS',
'3': 'WALKING_DOWNSTAIRS',
'4': 'SITTING',
'5': 'STANDING',
'6': 'LAYING'}
# ############################# Open data set ###############################
X = []
y = []
X_fin = []
y_fin = []
print "Opening dataset..."
try:
with open("X_train.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X.append(pair)
f.close()
with open("y_train.txt", 'rU') as f:
res = list(f)
for line in res:
y.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
try:
with open("X_test.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X_fin.append(pair)
f.close()
with open("y_test.txt", 'rU') as f:
res = list(f)
for line in res:
y_fin.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
print "Dataset opened."
X = np.array(X)
y = np.array(y)
###### Separate data set into 70% training set and 30% test set
print "Separating data into 70% training set & 30% test set..."
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3)
print "Dataset separated."
###### Get best parameters ######
############################### Kernel=Linear ###############################
print "######## SVM, Kernel = Linear #########"
#C_linear = [0.1, 1, 10, 100]
C_linear = [3]
result_linear = []
print "C value chosen from: ", C_linear
print "Calculating accuracy with K-fold..."
for C in C_linear:
svc_linear = svm.SVC(kernel='linear', C=C)
scores = cross_validation.cross_val_score(svc_linear, X_train, y_train, scoring='accuracy', cv=6)
result_linear.append(scores.mean())
print "result:", result_linear
#Result with different C are equal, so here choose C=1 directly as the best parameter.
best_param_linear = {"C": 3}
#linear_test_score = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X_test, y_test).score(X_test, y_test)
#rbf_test_score = svm.SVC(kernel='rbf', C=best_param_rbf.get("C"), gamma=best_param_rbf.get("gamma")).fit(X_test, y_test).score(X_test, y_test)
#poly_test_score = svm.SVC(kernel='poly', C=best_param_poly.get("C"), degree=best_param_poly.get("degree")).fit(X_test, y_test).score(X_test, y_test)
linear_test = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = linear_test.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
#print "Linear Kernel test score: ", linear_test_score
#print "RBF Kernel test score: ", rbf_test_score
#print "Poly Kernel test score: ", poly_test_score
################################### Random Forests ####################################
print "##### Random Forest ######"
n_estimators_list = range(1, 16, 1)
result_random_forests = []
max_score_rf = float("-inf")
best_param_rf = None
for n_estimators in n_estimators_list:
print "Testing n_estimators = ", n_estimators
rf_clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=None, min_samples_split=1, random_state=0)
scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring="accuracy", cv=6)
result_random_forests.append(scores.mean())
if scores.mean() > max_score_rf:
max_score_rf = scores.mean()
best_param_rf = {"n_estimators": n_estimators}
print "number of trees: ", n_estimators_list
print "results: ", result_random_forests
print "best accuracy: ", max_score_rf
print "best parameter: ", best_param_rf
rf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None,
min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,
y_test)
print "Test set accuracy: ", rf_clf_test_score
rf_clf = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None, min_samples_split=1,
random_state=0).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = rf_clf.predict(X_fin[i])
b = y_fin[i]
print "+ ", a[0],
print "- ", b
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
################################### K Nearest Neighbors ####################################
print "##### K Nearest Neighbors ######"
n_neighbors_list = range(1, 6, 1)
result_n_neighbors = []
max_score_knn = float("-inf")
best_param_knn = None
for n_neighbors in n_neighbors_list:
print "Testing n_neighbors = ", n_neighbors
neigh = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_validation.cross_val_score(neigh, X_train, y_train, scoring="accuracy", cv=6)
result_n_neighbors.append(scores.mean())
if scores.mean() > max_score_knn:
max_score_knn = scores.mean()
best_param_knn = {"n_neighbors": n_neighbors}
print "number of neighbors: ", n_neighbors_list
print "results: ", result_n_neighbors
print "best accuracy: ", max_score_knn
print "best parameter: ", best_param_knn
neigh_test_score = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X_test, y_test).score(X_test, y_test)
print "Test set accuracy: ", neigh_test_score
neigh = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = neigh.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
|
apache-2.0
|
trendelkampschroer/PyEMMA
|
pyemma/plots/timescales.py
|
1
|
5594
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from six.moves import range
__author__ = 'noe'
import numpy as _np
import matplotlib.pylab as _plt
def plot_implied_timescales(ITS, ax=None, outfile=None, show_mle=True, show_mean=True,
xlog=False, ylog=True, confidence=0.95, refs=None,
units='steps', dt=1., **kwargs):
r""" Generate a pretty implied timescale plot
Parameters
----------
ITS : implied timescales object.
Object whose data will be plotted. Must provide the functions: get_timescales() and get_timescales(i) where i is the
the property samples_available
ax : matplotlib Axes object, optional, default = None
the axes to plot to. When set to None the default Axes object will be used.
outfile : str, optional, default = None
output file to write the figure to. When not given, the plot will be displayed
show_mean : bool, default = True
Line for mean value will be shown, if available
show_mle : bool, default = True
Line for maximum likelihood estimate will be shown
xlog : bool, optional, default = False
Iff true, the x-Axis is logarithmized
ylog : bool, optional, default = True
Iff true, the y-Axis is logarithmized
confidence : float, optional, default = 0.95
The confidence interval for plotting error bars (if available)
refs : ndarray((m), dtype=float), optional, default = None
Reference (exact solution or other reference) timescales if known. The number of timescales must match those
in the ITS object
units: str, optional, default = 'steps'
Affects the labeling of the axes. Used with :py:obj:`dt`, allows for changing the physical units of the axes.
Accepts simple LaTeX math strings, eg. '$\mu$s'
dt: float, optional, default = 1.0
Physical time between frames, expressed the units given in :py:obj:`units`. E.g, if you know that each
frame corresponds to .010 ns, you can use the combination of parameters :py:obj:`dt` =0.01,
:py:obj:`units` ='ns' to display the implied timescales in ns (instead of frames)
**kwargs: Will be parsed to pyplot.plo when plotting the MLE datapoints (not the bootstrapped means).
See the doc of pyplot for more options. Most useful lineproperties like `marker='o'` and/or :markersize=5
Returns
-------
ax : Axes object containing the plot
"""
# check input
if (ax is None):
ax = _plt.gca()
colors = ['blue','red','green','cyan','purple','orange','violet']
lags = ITS.lagtimes
xmax = _np.max(lags)
#ymin = min(_np.min(lags), _np.min(ITS.get_timescales()))
#ymax = 1.5*_np.min(ITS.get_timescales())
for i in range(ITS.number_of_timescales):
# plot estimate
if show_mle:
ax.plot(lags*dt, ITS.get_timescales(process=i)*dt, color=colors[i % len(colors)], **kwargs)
# sample available?
if (ITS.samples_available):# and ITS.sample_number_of_timescales > i):
# plot sample mean
if show_mean:
ax.plot(lags*dt, ITS.get_sample_mean(process=i)*dt, marker='o',
color=colors[i % len(colors)], linestyle='dashed')
(lconf, rconf) = ITS.get_sample_conf(confidence, i)
ax.fill_between(lags*dt, lconf*dt, rconf*dt, alpha=0.2, color=colors[i % len(colors)])
# reference available?
if (refs is not None):
tref = refs[i]
ax.plot([0,min(tref,xmax)]*dt, [tref,tref]*dt, color='black', linewidth=1)
# cutoff
ax.plot(lags*dt, lags*dt, linewidth=2, color='black')
ax.set_xlim([1*dt,xmax*dt])
#ax.set_ylim([ymin,ymax])
ax.fill_between(lags*dt, ax.get_ylim()[0]*_np.ones(len(lags))*dt, lags*dt, alpha=0.5, color='grey')
# formatting
ax.set_xlabel('lag time / %s'%units)
ax.set_ylabel('timescale / %s'%units)
if (xlog):
ax.set_xscale('log')
if (ylog):
ax.set_yscale('log')
# show or save
# if outfile is None:
# _plt.show()
if outfile is not None:
_plt.savefig(outfile)
return ax
|
bsd-2-clause
|
iismd17/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
242
|
5885
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
jaytlennon/Emergence
|
figure_code/MacroecologyPatterns/TaylorsLaw.py
|
8
|
1412
|
from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
from scipy import stats
mydir = os.path.expanduser('~/GitHub/Emergence')
tools = os.path.expanduser(mydir + "/tools")
_lw = 2
sz = 20
df = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
df2 = pd.DataFrame({'length' : df['length'].groupby(df['sim']).mean()})
df2['NS'] = np.log10(df['avg.pop.size'].groupby(df['sim']).mean())
df2['var'] = np.log10(df['pop.var'].groupby(df['sim']).mean())
df2 = df2[df2['var'] > 1]
#### plot figure ###############################################################
fs = 14
fig = plt.figure(figsize=(3, 2))
fig.add_subplot(1, 1, 1)
Nlist = df2['NS'].tolist()
Vlist = df2['var'].tolist()
plt.scatter(Nlist, Vlist, lw=_lw, color='0.7', s = sz)
m, b, r, p, std_err = stats.linregress(Nlist, Vlist)
Nlist = np.array(Nlist)
plt.plot(Nlist, m*Nlist + b, '-', color='k', label='$z$ = '+str(round(m,2)), lw=_lw)
xlab = r"$log_{10}$"+'(mean)'
ylab = r"$log_{10}$"+'(variance)'
plt.xlabel(xlab, fontsize=fs)
plt.tick_params(axis='both', labelsize=fs-3)
plt.ylabel(ylab, fontsize=fs)
plt.legend(loc='best', fontsize=fs-3, frameon=False)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/TaylorsLaw.png', dpi=200, bbox_inches = "tight")
plt.close()
|
mit
|
nicolas998/Op_Radar
|
06_Codigos/Cron_Actualiza_Obs.py
|
1
|
6206
|
#!/usr/bin/env python
import numpy as np
import pickle
import pandas as pnd
import os
import datetime as dt
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
print '###################################################### Actualiza caudales historicos observados ####################################'
#-------------------------------------------------------------------
#VARIABLES DEL CRON
#-------------------------------------------------------------------
rutaEst = '/home/nicolas/Operacional/Op_Radar/03_Simulaciones/03_Stream_Observed/Comparison_Stations.txt'
rutaFile = '/home/nicolas/Operacional/Op_Radar/03_Simulaciones/03_Stream_Observed/'
rutaTemp = '/home/nicolas/Operacional/Op_Radar/03_Simulaciones/03_Stream_Observed/Qobs_temp.qobs'
# replace: indica si se va a remplazar toda la serie historica consultada de caudales o no, por defecto se deja en que no
replace = False
# DeltaAtras: Cantidad de pasos en minutos hacia atras para que se realice la consulta
DeltaAtras = 60
#-------------------------------------------------------------------
#LEE PARAM DE LOS CAUDALES OBSERVADOS QUE DEBE CONSULTAR
#-------------------------------------------------------------------
#Lectura del archivo con la informacion de las estaciones con caudales.
f = open(rutaEst,'r')
L = f.readlines()
f.close()
#Obtiene en un diccionario las caracteristicas de las estaciones
#Obtiene en un diccionario las caracteristicas de las estaciones
DictCaudal = {}
for j in L[7:]:
Data = j.split(',')
if Data[0].startswith('#') == False:
DictCaudal.update({Data[0]:{}})
DictCaudal[Data[0]].update({'ID':int(Data[1])})
DictCaudal[Data[0]].update({'Nodo':int(Data[2])})
DictCaudal[Data[0]].update({'Coef':float(Data[3])})
DictCaudal[Data[0]].update({'Expo':float(Data[4])})
#-------------------------------------------------------------------
#Fechas para la consulta
#-------------------------------------------------------------------
# Obtiene el datetime
fecha_1 = dt.datetime.now() - dt.timedelta(minutes = DeltaAtras)
#Si no es cercano a 5 lo hace igual a un numero multiplo de cinco
if fecha_1.minute%5 <> 0:
minute = int(np.floor(fecha_1.minute/10.0) * 10)
fecha_1 = dt.datetime(fecha_1.year, fecha_1.month, fecha_1.day, fecha_1.hour, minute)
fecha_2 = dt.datetime.now()# - dt.timedelta(minutes = 50)
if fecha_2.minute%5 <> 0:
minute = int(np.floor(fecha_2.minute/10.0) * 10)
fecha_2 = dt.datetime(fecha_2.year, fecha_2.month, fecha_2.day, fecha_2.hour, minute)
# Lo convierte en texto
fecha1 = fecha_1.strftime('%Y-%m-%d')
fecha2 = fecha_2.strftime('%Y-%m-%d')
hora_1 = fecha_1.strftime('%H:%M')
hora_2 = fecha_2.strftime('%H:%M')
#-------------------------------------------------------------------
#Consulta y mira si debe actualizar
#-------------------------------------------------------------------
#Lista de caudales observados historicos acutales
L = os.listdir(rutaFile)
L = [i for i in L if i.endswith('.qobs')]
#Itera sobre las estaciones que tienen caudales para consultarlas
for k in DictCaudal.keys():
#Para consultar una estacion
idEst = str(DictCaudal[k]['ID'])
c = str(DictCaudal[k]['Coef'])
e = str(DictCaudal[k]['Expo'])
#Nombre historico caudal
nombre = 'Qobs_'+k+'.qobs'
# Mira si ya existen registros en la carpeta para esa estacion
Existe = False
try:
pos = L.index(nombre)
Existe = True
if replace:
rutaFin = rutaFile + nombre
else:
rutaFin = rutaTemp
except:
rutaFin = rutaFile + nombre
print rutaFin
#Genera el comando de consulta y lo ejecuta
comando = ('/home/nicolas/Operacional/Op_Radar/06_Codigos/Consulta_Caudal.py '+fecha1+' '+fecha2+' '
+rutaFin+' '+idEst+' -t 5min -1 '+hora_1+' -2 '+hora_2
+' -c '+c+' -e '+e)
print comando
os.system(comando)
#Si ya existe le adjunta la nueva consulta
if Existe:
Qhist = pnd.read_msgpack(rutaFile + L[pos])
Qactual = pnd.read_msgpack(rutaTemp)
#Junta ambos resgistros
D = pnd.date_range(Qhist.index[0], Qactual.index[-1], freq='5min')
Qjoin = pnd.Series(np.zeros(D.shape), D)
Qjoin[Qhist.index] = Qhist
Qjoin[Qactual.index] = Qactual
#Guarda el archivo historico
Qjoin.to_msgpack(rutaFile + L[pos])
print '################################################################################################'
print 'consultado: '+k
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
print '###################################################### Borra figuras de caudales simulados #########################################'
rutaQsim = '/media/nicolas/discoGrande/01_SIATA/ResultadosOperacion/Ope_Barbosa_Radar/Qsim/'
#Lista archivos y obtiene fechas de creacion
l = os.listdir(rutaQsim)
fechas = lambda x: dt.datetime.fromtimestamp(os.path.getmtime(x))
l = [ruta + i for i in l]
d = map(fechas, l)
#Organiza y prepara archivos para borrado
d2 = [[i,j] for i,j in zip(d,l)]
d2.sort()
d3 = d2[:-50] #cantidad de archivos que deja: 50.
#Borra archivos
comando = ['rm '+i[1] for i in d3]
map(os.system, comando)
|
gpl-3.0
|
vortex-ape/scikit-learn
|
sklearn/neighbors/approximate.py
|
4
|
23946
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
"""
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
"""
self.fit(X)
return self.transform(X)
def transform(self, X):
"""
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
"""
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint
Parameters
----------
n_components : int or 'auto', optional (default = 32)
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth:`radius_neighbors` queries.
n_candidates : int (default = 50)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42) # doctest: +SKIP
>>> lshf.fit(X_train) # doctest: +SKIP
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
... # doctest: +SKIP
>>> distances # doctest: +SKIP
array([[0.069..., 0.149...],
[0.229..., 0.481...],
[0.004..., 0.014...]])
>>> indices # doctest: +SKIP
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
liuwenf/moose
|
modules/porous_flow/doc/tests/broadbridge_white.py
|
11
|
212116
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected_bw01_05():
# these data were generated by Mathematica
data = [
[0.392105901,-1.85E-16],
[0.385747539,0.018000654],
[0.379396059,0.035898933],
[0.373053935,0.053696124],
[0.366723642,0.071393526],
[0.360407654,0.088992456],
[0.354108438,0.106494241],
[0.347828456,0.123900221],
[0.341570154,0.141211746],
[0.335335963,0.158430173],
[0.329128294,0.175556871],
[0.322949536,0.192593214],
[0.31680205,0.209540582],
[0.310688166,0.226400361],
[0.304610181,0.24317394],
[0.298570355,0.259862713],
[0.292570907,0.276468074],
[0.286614011,0.292991418],
[0.280701797,0.309434141],
[0.274836344,0.325797639],
[0.269019676,0.342083305],
[0.263253764,0.358292529],
[0.257540521,0.374426699],
[0.251881798,0.390487198],
[0.246279384,0.406475404],
[0.240735004,0.422392689],
[0.235250313,0.438240419],
[0.229826899,0.454019952],
[0.22446628,0.469732639],
[0.219169902,0.48537982],
[0.213939136,0.500962828],
[0.208775279,0.516482986],
[0.203679553,0.531941605],
[0.198653105,0.547339986],
[0.193697003,0.562679417],
[0.188812238,0.577961176],
[0.183999725,0.593186528],
[0.179260299,0.608356722],
[0.17459472,0.623472996],
[0.17000367,0.638536575],
[0.165487751,0.653548668],
[0.161047494,0.668510469],
[0.15668335,0.683423158],
[0.152395697,0.698287899],
[0.148184839,0.713105842],
[0.144051007,0.727878119],
[0.139994363,0.742605846],
[0.136014994,0.757290125],
[0.132112924,0.77193204],
[0.128288106,0.786532658],
[0.12454043,0.80109303],
[0.120869721,0.815614189],
[0.117275744,0.830097153],
[0.113758203,0.844542919],
[0.110316745,0.858952472],
[0.106950961,0.873326776],
[0.103660388,0.887666778],
[0.100444512,0.901973409],
[0.097302771,0.916247582],
[0.094234553,0.930490194],
[0.091239204,0.944702121],
[0.088316027,0.958884227],
[0.085464282,0.973037353],
[0.082683195,0.987162329],
[0.079971952,1.001259963],
[0.077329709,1.015331048],
[0.074755588,1.029376361],
[0.072248683,1.04339666],
[0.069808061,1.057392689],
[0.067432762,1.071365174],
[0.065121807,1.085314824],
[0.062874193,1.099242333],
[0.060688899,1.113148379],
[0.058564889,1.127033624],
[0.056501108,1.140898714],
[0.054496493,1.154744279],
[0.052549967,1.168570935],
[0.050660443,1.182379282],
[0.048826829,1.196169905],
[0.047048024,1.209943375],
[0.045322925,1.223700248],
[0.043650425,1.237441065],
[0.042029414,1.251166354],
[0.040458786,1.26487663],
[0.038937431,1.278572391],
[0.037464248,1.292254125],
[0.036038133,1.305922305],
[0.034657992,1.319577393],
[0.033322736,1.333219834],
[0.032031281,1.346850066],
[0.030782554,1.36046851],
[0.029575489,1.374075579],
[0.028409031,1.38767167],
[0.027282135,1.401257172],
[0.026193768,1.414832461],
[0.025142909,1.428397902],
[0.02412855,1.441953849],
[0.023149696,1.455500646],
[0.022205366,1.469038626],
[0.021294595,1.482568112],
[0.02041643,1.496089418],
[0.019569936,1.509602846],
[0.018754192,1.52310869],
[0.017968295,1.536607235],
[0.017211355,1.550098756],
[0.016482501,1.56358352],
[0.015780879,1.577061785],
[0.01510565,1.590533801],
[0.014455994,1.603999808],
[0.013831106,1.61746004],
[0.013230201,1.630914723],
[0.012652509,1.644364075],
[0.012097278,1.657808306],
[0.011563774,1.671247619],
[0.011051278,1.684682212],
[0.01055909,1.698112273],
[0.010086527,1.711537986],
[0.009632923,1.724959528],
[0.009197629,1.738377068],
[0.008780011,1.75179077],
[0.008379454,1.765200794],
[0.007995358,1.778607292],
[0.00762714,1.792010411],
[0.007274231,1.805410292],
[0.006936081,1.818807072],
[0.006612154,1.832200882],
[0.006301929,1.84559185],
[0.0060049,1.858980096],
[0.005720578,1.872365737],
[0.005448485,1.885748888],
[0.005188162,1.899129655],
[0.00493916,1.912508142],
[0.004701046,1.925884451],
[0.0044734,1.939258677],
[0.004255816,1.952630913],
[0.0040479,1.966001247],
[0.003849271,1.979369765],
[0.003659562,1.992736548],
[0.003478415,2.006101675],
[0.003305488,2.019465222],
[0.003140446,2.03282726],
[0.002982971,2.046187858],
[0.00283275,2.059547084],
[0.002689485,2.072905001],
[0.002552888,2.08626167],
[0.002422679,2.099617149],
[0.00229859,2.112971494],
[0.002180363,2.126324759],
[0.002067747,2.139676996],
[0.001960503,2.153028253],
[0.001858399,2.166378577],
[0.001761212,2.179728013],
[0.001668728,2.193076605],
[0.001580741,2.206424393],
[0.001497053,2.219771416],
[0.001417472,2.233117712],
[0.001341816,2.246463318],
[0.001269908,2.259808266],
[0.001201579,2.27315259],
[0.001136668,2.286496321],
[0.001075017,2.299839489],
[0.001016478,2.313182122],
[0.000960906,2.326524248],
[0.000908165,2.339865891],
[0.000858122,2.353207077],
[0.00081065,2.36654783],
[0.000765629,2.379888171],
[0.000722942,2.393228122],
[0.000682478,2.406567703],
[0.000644131,2.419906933],
[0.000607799,2.433245831],
[0.000573384,2.446584415],
[0.000540792,2.459922701],
[0.000509936,2.473260704],
[0.000480729,2.486598441],
[0.000453091,2.499935925],
[0.000426942,2.513273169],
[0.00040221,2.526610188],
[0.000378822,2.539946992],
[0.000356712,2.553283595],
[0.000335814,2.566620006],
[0.000316068,2.579956236],
[0.000297413,2.593292296],
[0.000279794,2.606628194],
[0.000263158,2.61996394],
[0.000247453,2.633299543],
[0.000232631,2.64663501],
[0.000218646,2.659970348],
[0.000205454,2.673305566],
[0.000193013,2.68664067],
[0.000181282,2.699975667],
[0.000170225,2.713310562],
[0.000159804,2.726645362],
[0.000149987,2.739980072],
[0.000140739,2.753314697],
[0.000132031,2.766649242],
[0.000123832,2.779983713],
[0.000116116,2.793318112],
[0.000108854,2.806652445],
[0.000102023,2.819986715],
[9.55974E-05,2.833320927],
[8.95556E-05,2.846655083],
[8.38759E-05,2.859989187],
[7.85379E-05,2.873323242],
[7.35222E-05,2.886657251],
[6.88105E-05,2.899991216],
[6.43855E-05,2.913325141],
[6.02308E-05,2.926659028],
[5.63309E-05,2.93999288],
[5.26709E-05,2.953326697],
[4.92371E-05,2.966660483],
[4.60162E-05,2.97999424],
[4.29958E-05,2.993327969],
[4.0164E-05,3.006661672],
[3.75099E-05,3.01999535],
[3.50228E-05,3.033329006],
[3.26927E-05,3.04666264],
[3.05105E-05,3.059996254],
[2.84671E-05,3.073329849],
[2.65542E-05,3.086663427],
[2.47639E-05,3.099996988],
[2.30888E-05,3.113330534],
[2.15218E-05,3.126664066],
[2.00564E-05,3.139997584],
[1.86863E-05,3.153331089],
[1.74056E-05,3.166664583],
[1.62088E-05,3.179998066],
[1.50906E-05,3.193331538],
[1.40462E-05,3.206665001],
[1.3071E-05,3.219998455],
[1.21605E-05,3.2333319],
[1.13107E-05,3.246665338],
[1.05178E-05,3.259998768],
[9.78E-06,3.273332192],
[9.09E-06,3.286665609],
[8.44E-06,3.29999902],
[7.85E-06,3.313332426],
[7.29E-06,3.326665826],
[6.77E-06,3.339999222],
[6.28E-06,3.353332613],
[5.83E-06,3.366666],
[5.41E-06,3.379999384],
[5.02E-06,3.393332763],
[4.65E-06,3.40666614],
[4.31E-06,3.419999513],
[4.00E-06,3.433332883],
[3.71E-06,3.446666251],
[3.43E-06,3.459999616],
[3.18E-06,3.473332978],
[2.95E-06,3.486666339],
[2.73E-06,3.499999697],
[2.52E-06,3.513333054],
[2.34E-06,3.526666409],
[2.16E-06,3.539999762],
[2.00E-06,3.553333114],
[1.85E-06,3.566666465],
[1.71E-06,3.579999814],
[1.58E-06,3.593333162],
[1.46E-06,3.606666508],
[1.35E-06,3.619999854],
[1.25E-06,3.633333199],
[1.15E-06,3.646666543],
[1.06E-06,3.659999886],
[9.80E-07,3.673333229],
[9.04E-07,3.68666657],
[8.34E-07,3.699999911],
[7.69E-07,3.713333252],
[7.09E-07,3.726666592],
[6.54E-07,3.739999931],
[6.02E-07,3.75333327],
[5.55E-07,3.766666609],
[5.11E-07,3.779999947],
[4.71E-07,3.793333284],
[4.33E-07,3.806666622],
[3.99E-07,3.819999959],
[3.67E-07,3.833333295],
[3.38E-07,3.846666632],
[3.11E-07,3.859999968],
[2.86E-07,3.873333304],
[2.63E-07,3.88666664],
[2.41E-07,3.899999975],
[2.22E-07,3.913333311],
[2.04E-07,3.926666646],
[1.87E-07,3.939999981],
[1.72E-07,3.953333316],
[1.58E-07,3.966666651],
[1.45E-07,3.979999986],
[1.33E-07,3.99333332],
[1.22E-07,4.006666655],
[1.12E-07,4.019999989],
[1.02E-07,4.033333323],
[9.38E-08,4.046666657],
[8.59E-08,4.059999992],
[7.87E-08,4.073333326],
[7.21E-08,4.08666666],
[6.60E-08,4.099999994],
[6.04E-08,4.113333327],
[5.53E-08,4.126666661],
[5.06E-08,4.139999995],
[4.63E-08,4.153333329],
[4.23E-08,4.166666663]]
return zip(*data)
def expected_bw01_2():
# these data were generated by Mathematica
data = [
[0.618677819, -5.92E-16],
[0.615340206, 0.022650374],
[0.611974986, 0.045215103],
[0.6085824, 0.067694136],
[0.605162698, 0.090087441],
[0.601716147, 0.112394995],
[0.598243024, 0.134616796],
[0.594743623, 0.15675285],
[0.591218246, 0.178803183],
[0.587667212, 0.200767834],
[0.584090852, 0.222646854],
[0.580489509, 0.244440313],
[0.576863542, 0.266148291],
[0.57321332, 0.287770886],
[0.569539227, 0.309308208],
[0.565841661, 0.330760383],
[0.562121029, 0.35212755],
[0.558377756, 0.373409863],
[0.554612278, 0.394607489],
[0.550825041, 0.415720609],
[0.547016509, 0.436749418],
[0.543187154, 0.457694125],
[0.539337463, 0.478554953],
[0.535467935, 0.499332137],
[0.53157908, 0.520025925],
[0.527671421, 0.540636579],
[0.523745493, 0.561164375],
[0.519801842, 0.5816096],
[0.515841025, 0.601972552],
[0.511863612, 0.622253546],
[0.507870181, 0.642452904],
[0.503861324, 0.662570964],
[0.499837641, 0.682608074],
[0.495799743, 0.702564593],
[0.49174825, 0.722440892],
[0.487683792, 0.742237353],
[0.483607009, 0.76195437],
[0.479518549, 0.781592346],
[0.475419068, 0.801151695],
[0.471309231, 0.820632841],
[0.467189711, 0.840036218],
[0.463061186, 0.859362271],
[0.458924345, 0.878611453],
[0.454779881, 0.897784227],
[0.450628492, 0.916881063],
[0.446470884, 0.935902442],
[0.442307768, 0.954848854],
[0.438139859, 0.973720794],
[0.433967877, 0.992518767],
[0.429792545, 1.011243287],
[0.425614591, 1.029894873],
[0.421434745, 1.048474052],
[0.417253739, 1.066981357],
[0.41307231, 1.08541733],
[0.408891192, 1.103782516],
[0.404711124, 1.12207747],
[0.400532843, 1.14030275],
[0.396357088, 1.158458919],
[0.392184597, 1.176546547],
[0.388016107, 1.19456621],
[0.383852353, 1.212518485],
[0.37969407, 1.230403958],
[0.375541987, 1.248223215],
[0.371396835, 1.265976849],
[0.367259338, 1.283665455],
[0.363130218, 1.301289633],
[0.359010191, 1.318849985],
[0.35489997, 1.336347116],
[0.350800263, 1.353781634],
[0.346711769, 1.37115415],
[0.342635186, 1.388465276],
[0.3385712, 1.405715627],
[0.334520495, 1.42290582],
[0.330483743, 1.440036471],
[0.32646161, 1.457108201],
[0.322454755, 1.47412163],
[0.318463827, 1.491077379],
[0.314489465, 1.50797607],
[0.310532299, 1.524818324],
[0.306592951, 1.541604765],
[0.30267203, 1.558336015],
[0.298770135, 1.575012695],
[0.294887856, 1.591635427],
[0.29102577, 1.608204833],
[0.287184443, 1.624721533],
[0.283364427, 1.641186147],
[0.279566267, 1.657599292],
[0.27579049, 1.673961585],
[0.272037613, 1.690273642],
[0.268308141, 1.706536077],
[0.264602564, 1.722749501],
[0.260921359, 1.738914524],
[0.257264991, 1.755031754],
[0.25363391, 1.771101797],
[0.250028551, 1.787125254],
[0.246449338, 1.803102727],
[0.242896679, 1.819034814],
[0.239370967, 1.834922108],
[0.235872583, 1.850765201],
[0.232401892, 1.866564683],
[0.228959244, 1.882321139],
[0.225544976, 1.89803515],
[0.22215941, 1.913707295],
[0.218802853, 1.92933815],
[0.215475597, 1.944928285],
[0.212177921, 1.960478269],
[0.208910087, 1.975988664],
[0.205672344, 1.99146003],
[0.202464928, 2.006892924],
[0.199288057, 2.022287897],
[0.196141937, 2.037645495],
[0.19302676, 2.052966263],
[0.189942702, 2.068250738],
[0.186889926, 2.083499456],
[0.183868582, 2.098712945],
[0.180878804, 2.113891732],
[0.177920714, 2.129036336],
[0.17499442, 2.144147275],
[0.172100016, 2.159225058],
[0.169237584, 2.174270193],
[0.166407191, 2.189283182],
[0.163608893, 2.204264521],
[0.160842733, 2.219214703],
[0.158108741, 2.234134214],
[0.155406935, 2.249023537],
[0.152737321, 2.26388315],
[0.150099893, 2.278713524],
[0.147494634, 2.293515127],
[0.144921514, 2.308288421],
[0.142380494, 2.323033864],
[0.139871523, 2.337751907],
[0.13739454, 2.352442999],
[0.134949472, 2.36710758],
[0.132536238, 2.381746088],
[0.130154745, 2.396358955],
[0.127804893, 2.410946608],
[0.12548657, 2.425509469],
[0.123199656, 2.440047953],
[0.120944023, 2.454562474],
[0.118719533, 2.469053437],
[0.116526041, 2.483521244],
[0.114363392, 2.497966292],
[0.112231426, 2.512388971],
[0.110129973, 2.526789669],
[0.108058857, 2.541168767],
[0.106017894, 2.555526641],
[0.104006896, 2.569863663],
[0.102025664, 2.584180199],
[0.100073998, 2.598476611],
[0.098151688, 2.612753256],
[0.096258519, 2.627010486],
[0.094394273, 2.641248646],
[0.092558724, 2.655468081],
[0.090751642, 2.669669127],
[0.088972794, 2.683852116],
[0.087221939, 2.698017378],
[0.085498835, 2.712165234],
[0.083803235, 2.726296003],
[0.082134887, 2.74041],
[0.080493538, 2.754507534],
[0.07887893, 2.768588909],
[0.077290801, 2.782654426],
[0.075728889, 2.79670438],
[0.074192926, 2.810739063],
[0.072682645, 2.824758761],
[0.071197773, 2.838763758],
[0.069738039, 2.85275433],
[0.068303167, 2.866730753],
[0.066892881, 2.880693295],
[0.065506903, 2.894642223],
[0.064144953, 2.908577796],
[0.062806752, 2.922500273],
[0.061492017, 2.936409906],
[0.060200467, 2.950306944],
[0.058931819, 2.964191632],
[0.05768579, 2.978064212],
[0.056462097, 2.991924919],
[0.055260455, 3.005773987],
[0.054080581, 3.019611646],
[0.052922192, 3.03343812],
[0.051785005, 3.047253633],
[0.050668736, 3.061058401],
[0.049573104, 3.074852639],
[0.048497826, 3.088636558],
[0.047442623, 3.102410365],
[0.046407213, 3.116174264],
[0.045391318, 3.129928455],
[0.04439466, 3.143673134],
[0.043416962, 3.157408495],
[0.042457949, 3.171134727],
[0.041517347, 3.184852018],
[0.040594882, 3.198560551],
[0.039690285, 3.212260506],
[0.038803285, 3.225952059],
[0.037933615, 3.239635385],
[0.037081009, 3.253310654],
[0.036245203, 3.266978035],
[0.035425935, 3.28063769],
[0.034622944, 3.294289783],
[0.033835973, 3.307934471],
[0.033064765, 3.321571911],
[0.032309066, 3.335202256],
[0.031568626, 3.348825655],
[0.030843193, 3.362442257],
[0.030132523, 3.376052204],
[0.029436368, 3.389655641],
[0.028754488, 3.403252706],
[0.028086641, 3.416843535],
[0.027432591, 3.430428262],
[0.026792103, 3.44400702],
[0.026164943, 3.457579938],
[0.025550881, 3.471147141],
[0.02494969, 3.484708755],
[0.024361146, 3.498264901],
[0.023785025, 3.511815698],
[0.023221108, 3.525361264],
[0.022669177, 3.538901713],
[0.022129018, 3.552437159],
[0.021600419, 3.565967712],
[0.02108317, 3.57949348],
[0.020577066, 3.593014569],
[0.020081901, 3.606531083],
[0.019597475, 3.620043125],
[0.019123589, 3.633550795],
[0.018660047, 3.64705419],
[0.018206655, 3.660553406],
[0.017763223, 3.674048538],
[0.017329562, 3.687539678],
[0.016905487, 3.701026917],
[0.016490815, 3.714510342],
[0.016085367, 3.727990041],
[0.015688964, 3.741466098],
[0.015301431, 3.754938598],
[0.014922596, 3.768407621],
[0.014552289, 3.781873247],
[0.014190343, 3.795335555],
[0.013836593, 3.808794621],
[0.013490877, 3.822250521],
[0.013153035, 3.835703327],
[0.012822911, 3.849153111],
[0.012500348, 3.862599945],
[0.012185196, 3.876043897],
[0.011877305, 3.889485034],
[0.011576526, 3.902923423],
[0.011282716, 3.916359127],
[0.010995731, 3.929792212],
[0.010715431, 3.943222738],
[0.010441679, 3.956650766],
[0.010174339, 3.970076355],
[0.009913277, 3.983499564],
[0.009658364, 3.996920449],
[0.009409469, 4.010339066],
[0.009166466, 4.023755469],
[0.008929232, 4.037169711],
[0.008697644, 4.050581845],
[0.008471582, 4.063991921],
[0.008250929, 4.07739999],
[0.008035568, 4.090806099],
[0.007825385, 4.104210296],
[0.00762027, 4.117612628],
[0.007420113, 4.131013141],
[0.007224806, 4.144411878],
[0.007034243, 4.157808885],
[0.006848321, 4.171204202],
[0.006666939, 4.184597871],
[0.006489995, 4.197989934],
[0.006317393, 4.21138043],
[0.006149037, 4.224769398],
[0.005984831, 4.238156875],
[0.005824684, 4.2515429],
[0.005668505, 4.264927508],
[0.005516204, 4.278310734],
[0.005367696, 4.291692614],
[0.005222893, 4.30507318],
[0.005081713, 4.318452467],
[0.004944073, 4.331830506],
[0.004809892, 4.345207329],
[0.004679091, 4.358582967],
[0.004551594, 4.37195745],
[0.004427324, 4.385330807],
[0.004306206, 4.398703067],
[0.004188169, 4.412074258],
[0.00407314, 4.425444408],
[0.00396105, 4.438813542],
[0.003851829, 4.452181688],
[0.003745412, 4.465548871],
[0.003641732, 4.478915115],
[0.003540726, 4.492280445],
[0.003442329, 4.505644884],
[0.00334648, 4.519008457],
[0.003253119, 4.532371184],
[0.003162187, 4.54573309],
[0.003073626, 4.559094194],
[0.002987378, 4.572454518],
[0.00290339, 4.585814082],
[0.002821606, 4.599172907],
[0.002741973, 4.612531011],
[0.002664439, 4.625888415],
[0.002588954, 4.639245136],
[0.002515467, 4.652601193],
[0.00244393, 4.665956603]]
return zip(*data)
def expected_bw01_8():
# these data were generated by Mathematica
data = [
[0.791313196, -1.78E-15],
[0.790759531, 0.028210217],
[0.79019862, 0.056398277],
[0.789630387, 0.084563925],
[0.789054754, 0.112706906],
[0.788471644, 0.140826962],
[0.787880977, 0.168923834],
[0.787282675, 0.19699726],
[0.786676658, 0.225046978],
[0.786062847, 0.253072723],
[0.785441161, 0.28107423],
[0.78481152, 0.309051229],
[0.784173841, 0.337003452],
[0.783528043, 0.364930627],
[0.782874045, 0.392832482],
[0.782211762, 0.420708743],
[0.781541113, 0.448559132],
[0.780862014, 0.476383374],
[0.780174381, 0.504181188],
[0.779478129, 0.531952294],
[0.778773174, 0.55969641],
[0.778059431, 0.587413253],
[0.777336815, 0.615102538],
[0.776605239, 0.642763978],
[0.775864617, 0.670397285],
[0.775114863, 0.698002171],
[0.77435589, 0.725578344],
[0.773587611, 0.753125514],
[0.772809939, 0.780643387],
[0.772022785, 0.808131668],
[0.771226063, 0.835590063],
[0.770419682, 0.863018274],
[0.769603556, 0.890416004],
[0.768777595, 0.917782953],
[0.76794171, 0.945118822],
[0.767095812, 0.97242331],
[0.766239812, 0.999696115],
[0.765373621, 1.026936933],
[0.764497148, 1.05414546],
[0.763610304, 1.081321392],
[0.762712999, 1.108464423],
[0.761805143, 1.135574247],
[0.760886646, 1.162650556],
[0.759957417, 1.189693042],
[0.759017368, 1.216701397],
[0.758066407, 1.243675311],
[0.757104445, 1.270614474],
[0.756131391, 1.297518577],
[0.755147156, 1.324387308],
[0.754151649, 1.351220355],
[0.75314478, 1.378017409],
[0.75212646, 1.404778155],
[0.751096599, 1.431502283],
[0.750055108, 1.458189479],
[0.749001898, 1.484839431],
[0.74793688, 1.511451826],
[0.746859964, 1.538026352],
[0.745771063, 1.564562695],
[0.744670088, 1.591060543],
[0.743556952, 1.617519582],
[0.742431567, 1.643939502],
[0.741293847, 1.670319989],
[0.740143704, 1.696660731],
[0.738981053, 1.722961418],
[0.737805808, 1.749221737],
[0.736617883, 1.775441378],
[0.735417196, 1.801620032],
[0.734203661, 1.827757388],
[0.732977195, 1.853853138],
[0.731737716, 1.879906973],
[0.730485142, 1.905918587],
[0.729219392, 1.931887674],
[0.727940385, 1.957813927],
[0.726648042, 1.983697043],
[0.725342283, 2.009536719],
[0.724023032, 2.035332651],
[0.722690211, 2.06108454],
[0.721343744, 2.086792086],
[0.719983557, 2.112454991],
[0.718609575, 2.138072958],
[0.717221726, 2.163645692],
[0.715819937, 2.189172899],
[0.714404139, 2.214654288],
[0.712974262, 2.240089568],
[0.711530239, 2.265478451],
[0.710072001, 2.290820651],
[0.708599484, 2.316115883],
[0.707112624, 2.341363865],
[0.705611358, 2.366564317],
[0.704095625, 2.39171696],
[0.702565366, 2.416821519],
[0.701020522, 2.44187772],
[0.699461037, 2.466885293],
[0.697886856, 2.491843969],
[0.696297926, 2.516753481],
[0.694694195, 2.541613568],
[0.693075615, 2.566423967],
[0.691442137, 2.591184422],
[0.689793715, 2.615894677],
[0.688130307, 2.640554481],
[0.686451869, 2.665163585],
[0.684758362, 2.689721743],
[0.683049748, 2.714228712],
[0.681325991, 2.738684253],
[0.679587059, 2.763088131],
[0.677832919, 2.787440111],
[0.676063542, 2.811739966],
[0.674278903, 2.83598747],
[0.672478976, 2.8601824],
[0.67066374, 2.884324538],
[0.668833176, 2.90841367],
[0.666987266, 2.932449585],
[0.665125996, 2.956432075],
[0.663249354, 2.980360939],
[0.661357331, 3.004235976],
[0.659449921, 3.028056993],
[0.65752712, 3.051823798],
[0.655588927, 3.075536204],
[0.653635345, 3.099194031],
[0.651666377, 3.122797099],
[0.649682031, 3.146345235],
[0.647682319, 3.169838269],
[0.645667254, 3.193276038],
[0.643636853, 3.216658382],
[0.641591135, 3.239985143],
[0.639530124, 3.263256173],
[0.637453846, 3.286471325],
[0.63536233, 3.309630457],
[0.633255608, 3.332733433],
[0.631133717, 3.355780121],
[0.628996695, 3.378770395],
[0.626844586, 3.401704132],
[0.624677435, 3.424581216],
[0.622495291, 3.447401535],
[0.620298208, 3.470164983],
[0.618086241, 3.492871456],
[0.615859452, 3.51552086],
[0.613617902, 3.538113102],
[0.611361658, 3.560648097],
[0.609090793, 3.583125764],
[0.606805379, 3.605546026],
[0.604505494, 3.627908814],
[0.60219122, 3.650214063],
[0.599862642, 3.672461712],
[0.597519848, 3.694651708],
[0.595162933, 3.716784001],
[0.592791991, 3.738858548],
[0.590407122, 3.760875311],
[0.588008432, 3.782834256],
[0.585596026, 3.804735357],
[0.583170017, 3.826578592],
[0.58073052, 3.848363943],
[0.578277654, 3.870091401],
[0.57581154, 3.891760959],
[0.573332307, 3.913372617],
[0.570840084, 3.934926382],
[0.568335005, 3.956422262],
[0.565817208, 3.977860275],
[0.563286835, 3.999240443],
[0.560744032, 4.020562792],
[0.558188946, 4.041827355],
[0.555621732, 4.063034169],
[0.553042545, 4.084183279],
[0.550451546, 4.105274732],
[0.5478489, 4.126308584],
[0.545234773, 4.147284892],
[0.542609337, 4.168203722],
[0.539972767, 4.189065144],
[0.53732524, 4.209869233],
[0.534666941, 4.230616069],
[0.531998052, 4.251305738],
[0.529318765, 4.271938331],
[0.52662927, 4.292513944],
[0.523929764, 4.313032677],
[0.521220446, 4.333494637],
[0.518501519, 4.353899935],
[0.515773187, 4.374248688],
[0.51303566, 4.394541015],
[0.510289151, 4.414777044],
[0.507533873, 4.434956904],
[0.504770046, 4.455080732],
[0.501997891, 4.475148668],
[0.499217631, 4.495160857],
[0.496429493, 4.515117449],
[0.493633708, 4.535018597],
[0.490830508, 4.554864462],
[0.488020128, 4.574655205],
[0.485202806, 4.594390996],
[0.482378781, 4.614072006],
[0.479548298, 4.633698412],
[0.476711601, 4.653270395],
[0.473868937, 4.672788139],
[0.471020555, 4.692251834],
[0.468166709, 4.711661672],
[0.465307651, 4.731017852],
[0.462443637, 4.750320574],
[0.459574925, 4.769570044],
[0.456701774, 4.788766469],
[0.453824445, 4.807910063],
[0.450943202, 4.827001043],
[0.448058307, 4.846039627],
[0.445170028, 4.865026039],
[0.442278631, 4.883960506],
[0.439384384, 4.902843259],
[0.436487556, 4.921674531],
[0.433588419, 4.940454558],
[0.430687243, 4.959183581],
[0.4277843, 4.977861843],
[0.424879864, 4.99648959],
[0.421974209, 5.015067071],
[0.419067607, 5.033594538],
[0.416160335, 5.052072246],
[0.413252666, 5.070500452],
[0.410344877, 5.088879417],
[0.407437242, 5.107209403],
[0.404530037, 5.125490675],
[0.401623537, 5.143723501],
[0.398718018, 5.161908152],
[0.395813754, 5.180044898],
[0.392911019, 5.198134016],
[0.390010089, 5.216175781],
[0.387111235, 5.234170473],
[0.384214732, 5.252118371],
[0.381320851, 5.270019759],
[0.378429863, 5.287874921],
[0.375542038, 5.305684143],
[0.372657646, 5.323447712],
[0.369776955, 5.341165919],
[0.366900231, 5.358839054],
[0.364027739, 5.376467409],
[0.361159743, 5.394051279],
[0.358296507, 5.411590959],
[0.355438289, 5.429086744],
[0.35258535, 5.446538934],
[0.349737946, 5.463947825],
[0.346896333, 5.481313718],
[0.344060763, 5.498636914],
[0.341231488, 5.515917714],
[0.338408757, 5.53315642],
[0.335592816, 5.550353336],
[0.33278391, 5.567508766],
[0.329982281, 5.584623013],
[0.327188167, 5.601696384],
[0.324401806, 5.618729183],
[0.321623432, 5.635721717],
[0.318853277, 5.652674292],
[0.316091568, 5.669587214],
[0.313338533, 5.686460791],
[0.310594393, 5.703295329],
[0.307859369, 5.720091137],
[0.305133678, 5.73684852],
[0.302417532, 5.753567787],
[0.299711144, 5.770249245],
[0.29701472, 5.786893201],
[0.294328464, 5.803499962],
[0.291652577, 5.820069835],
[0.288987256, 5.836603126],
[0.286332696, 5.853100143],
[0.283689086, 5.86956119],
[0.281056614, 5.885986573],
[0.278435462, 5.902376597],
[0.275825812, 5.918731567],
[0.273227839, 5.935051787],
[0.270641715, 5.95133756],
[0.268067609, 5.967589188],
[0.265505687, 5.983806974],
[0.262956109, 5.99999122],
[0.260419034, 6.016142225],
[0.257894614, 6.032260289],
[0.255383001, 6.048345711],
[0.252884339, 6.064398789],
[0.250398773, 6.080419819],
[0.247926438, 6.096409099],
[0.245467472, 6.112366922],
[0.243022003, 6.128293583],
[0.240590159, 6.144189375],
[0.238172063, 6.160054588],
[0.235767834, 6.175889515],
[0.233377587, 6.191694443],
[0.231001433, 6.207469661],
[0.228639479, 6.223215456],
[0.22629183, 6.238932114],
[0.223958584, 6.254619919],
[0.221639837, 6.270279153],
[0.219335682, 6.285910099],
[0.217046206, 6.301513036],
[0.214771493, 6.317088243],
[0.212511624, 6.332635998],
[0.210266676, 6.348156577],
[0.208036722, 6.363650253],
[0.20582183, 6.3791173],
[0.203622066, 6.394557989],
[0.201437492, 6.409972589],
[0.199268165, 6.42536137],
[0.197114141, 6.440724597],
[0.19497547, 6.456062535],
[0.1928522, 6.471375449],
[0.190744373, 6.4866636],
[0.18865203, 6.501927247],
[0.186575209, 6.517166649],
[0.184513941, 6.532382064],
[0.182468257, 6.547573746],
[0.180438184, 6.562741948],
[0.178423744, 6.577886922],
[0.176424957, 6.593008919],
[0.174441841, 6.608108185],
[0.172474408, 6.623184968],
[0.170522668, 6.638239512],
[0.168586629, 6.653272061],
[0.166666295, 6.668282854],
[0.164761666, 6.683272132],
[0.162872741, 6.698240131],
[0.160999515, 6.713187089],
[0.15914198, 6.728113238],
[0.157300126, 6.743018811],
[0.155473938, 6.757904037],
[0.1536634, 6.772769146],
[0.151868495, 6.787614365],
[0.150089199, 6.802439917],
[0.14832549, 6.817246026],
[0.146577339, 6.832032914],
[0.144844719, 6.846800799],
[0.143127597, 6.861549898],
[0.141425939, 6.876280429],
[0.139739708, 6.890992604],
[0.138068867, 6.905686636],
[0.136413374, 6.920362735],
[0.134773185, 6.935021109],
[0.133148256, 6.949661964],
[0.131538539, 6.964285506],
[0.129943984, 6.978891937],
[0.12836454, 6.993481459],
[0.126800154, 7.008054271],
[0.12525077, 7.02261057],
[0.12371633, 7.037150552],
[0.122196777, 7.05167441],
[0.120692049, 7.066182337],
[0.119202083, 7.080674523],
[0.117726816, 7.095151157],
[0.116266181, 7.109612425],
[0.114820113, 7.124058512],
[0.113388541, 7.138489602],
[0.111971395, 7.152905875],
[0.110568604, 7.167307511],
[0.109180095, 7.181694689],
[0.107805794, 7.196067584],
[0.106445624, 7.21042637],
[0.10509951, 7.224771221],
[0.103767372, 7.239102308],
[0.102449132, 7.253419799],
[0.101144709, 7.267723862],
[0.099854023, 7.282014663],
[0.098576991, 7.296292366],
[0.097313529, 7.310557134],
[0.096063553, 7.324809127],
[0.094826978, 7.339048505],
[0.093603719, 7.353275424],
[0.092393688, 7.367490042],
[0.091196798, 7.381692512],
[0.09001296, 7.395882987],
[0.088842086, 7.410061617],
[0.087684087, 7.424228553],
[0.086538871, 7.438383941],
[0.085406348, 7.452527929],
[0.084286427, 7.466660661],
[0.083179017, 7.480782279],
[0.082084023, 7.494892927],
[0.081001355, 7.508992742],
[0.079930919, 7.523081865],
[0.078872621, 7.537160431],
[0.077826367, 7.551228577],
[0.076792064, 7.565286437],
[0.075769617, 7.579334142],
[0.074758931, 7.593371824],
[0.073759911, 7.607399612],
[0.072772463, 7.621417635],
[0.071796491, 7.635426019],
[0.070831899, 7.64942489],
[0.069878593, 7.663414371],
[0.068936476, 7.677394584],
[0.068005454, 7.691365651],
[0.06708543, 7.705327691],
[0.066176309, 7.719280822],
[0.065277995, 7.733225162],
[0.064390393, 7.747160826],
[0.063513407, 7.761087928],
[0.062646943, 7.775006581],
[0.061790903, 7.788916897],
[0.060945195, 7.802818985],
[0.060109721, 7.816712956],
[0.059284389, 7.830598915],
[0.058469102, 7.844476971],
[0.057663767, 7.858347228],
[0.05686829, 7.87220979],
[0.056082576, 7.88606476],
[0.055306533, 7.899912239],
[0.054540065, 7.913752328],
[0.053783082, 7.927585125],
[0.053035488, 7.941410728],
[0.052297194, 7.955229235],
[0.051568105, 7.96904074],
[0.050848131, 7.982845338],
[0.05013718, 7.996643123],
[0.049435161, 8.010434186],
[0.048741984, 8.024218618],
[0.048057559, 8.03799651],
[0.047381795, 8.051767949],
[0.046714603, 8.065533024],
[0.046055895, 8.079291821],
[0.045405583, 8.093044426],
[0.044763578, 8.106790924],
[0.044129792, 8.120531397],
[0.043504139, 8.134265928],
[0.042886533, 8.147994598],
[0.042276887, 8.161717488],
[0.041675117, 8.175434678],
[0.041081136, 8.189146245],
[0.040494861, 8.202852266],
[0.039916208, 8.21655282],
[0.039345093, 8.230247979],
[0.038781434, 8.243937821],
[0.038225148, 8.257622416],
[0.037676154, 8.27130184],
[0.037134371, 8.284976162],
[0.036599718, 8.298645455],
[0.036072115, 8.312309787],
[0.035551482, 8.325969228],
[0.035037742, 8.339623846],
[0.034530815, 8.353273708],
[0.034030625, 8.36691888],
[0.033537093, 8.380559429],
[0.033050144, 8.394195418],
[0.032569702, 8.407826913],
[0.032095691, 8.421453974],
[0.031628037, 8.435076666],
[0.031166666, 8.448695049],
[0.030711505, 8.462309183],
[0.030262479, 8.47591913],
[0.029819519, 8.489524947],
[0.029382551, 8.503126693],
[0.028951504, 8.516724426],
[0.02852631, 8.530318201],
[0.028106896, 8.543908077],
[0.027693196, 8.557494106],
[0.027285139, 8.571076345],
[0.026882658, 8.584654847],
[0.026485686, 8.598229665],
[0.026094156, 8.611800851],
[0.025708002, 8.625368457],
[0.025327158, 8.638932535],
[0.024951561, 8.652493134],
[0.024581144, 8.666050305],
[0.024215845, 8.679604096],
[0.023855601, 8.693154556],
[0.023500349, 8.706701732],
[0.023150028, 8.720245672],
[0.022804576, 8.733786421],
[0.022463933, 8.747324027],
[0.022128038, 8.760858533],
[0.021796832, 8.774389986],
[0.021470257, 8.787918428],
[0.021148254, 8.801443903],
[0.020830765, 8.814966454],
[0.020517734, 8.828486123],
[0.020209104, 8.842002953],
[0.019904819, 8.855516983],
[0.019604824, 8.869028255],
[0.019309063, 8.882536809],
[0.019017484, 8.896042684],
[0.018730032, 8.909545918],
[0.018446654, 8.923046552],
[0.018167298, 8.936544621],
[0.017891912, 8.950040165],
[0.017620445, 8.963533218],
[0.017352845, 8.977023819],
[0.017089063, 8.990512003],
[0.016829049, 9.003997804],
[0.016572754, 9.017481259],
[0.016320128, 9.0309624],
[0.016071125, 9.044441264],
[0.015825697, 9.057917881],
[0.015583796, 9.071392287],
[0.015345377, 9.084864513],
[0.015110392, 9.098334591],
[0.014878797, 9.111802553],
[0.014650548, 9.12526843],
[0.014425598, 9.138732252],
[0.014203906, 9.152194052],
[0.013985426, 9.165653857],
[0.013770117, 9.179111698],
[0.013557936, 9.192567603],
[0.01334884, 9.206021602],
[0.01314279, 9.219473723],
[0.012939743, 9.232923993],
[0.012739659, 9.24637244],
[0.012542499, 9.259819092],
[0.012348222, 9.273263974],
[0.01215679, 9.286707113],
[0.011968164, 9.300148536],
[0.011782307, 9.313588267],
[0.011599179, 9.327026332],
[0.011418745, 9.340462756],
[0.011240968, 9.353897564],
[0.011065811, 9.367330779],
[0.010893237, 9.380762426],
[0.010723213, 9.394192528],
[0.010555703, 9.407621108],
[0.010390672, 9.421048189],
[0.010228086, 9.434473794],
[0.010067911, 9.447897944],
[0.009910115, 9.461320662],
[0.009754664, 9.47474197],
[0.009601525, 9.488161887],
[0.009450667, 9.501580437],
[0.009302058, 9.514997638],
[0.009155667, 9.528413512],
[0.009011462, 9.541828078],
[0.008869413, 9.555241357],
[0.008729491, 9.568653368],
[0.008591664, 9.58206413],
[0.008455905, 9.595473661],
[0.008322184, 9.608881982],
[0.008190472, 9.622289109],
[0.008060741, 9.635695062],
[0.007932963, 9.649099858],
[0.00780711, 9.662503515],
[0.007683156, 9.67590605],
[0.007561073, 9.68930748],
[0.007440836, 9.702707822],
[0.007322417, 9.716107093],
[0.007205791, 9.729505309],
[0.007090932, 9.742902487],
[0.006977816, 9.756298641],
[0.006866417, 9.769693788],
[0.006756712, 9.783087944],
[0.006648675, 9.796481123],
[0.006542283, 9.80987334],
[0.006437512, 9.823264611],
[0.00633434, 9.83665495],
[0.006232742, 9.850044371],
[0.006132697, 9.863432888],
[0.006034182, 9.876820515],
[0.005937176, 9.890207267],
[0.005841655, 9.903593156],
[0.0057476, 9.916978195],
[0.005654988, 9.930362399],
[0.005563799, 9.94374578],
[0.005474012, 9.95712835],
[0.005385607, 9.970510123],
[0.005298564, 9.98389111],
[0.005212863, 9.997271324],
[0.005128484, 10.01065078],
[0.005045408, 10.02402948],
[0.004963616, 10.03740745],
[0.00488309, 10.05078469],
[0.004803811, 10.06416121],
[0.00472576, 10.07753703],
[0.00464892, 10.09091216],
[0.004573272, 10.10428661],
[0.0044988, 10.11766038],
[0.004425486, 10.1310335],
[0.004353313, 10.14440596],
[0.004282265, 10.15777778],
[0.004212323, 10.17114898],
[0.004143474, 10.18451955],
[0.004075699, 10.19788951],
[0.004008983, 10.21125887],
[0.003943311, 10.22462764],
[0.003878668, 10.23799583],
[0.003815037, 10.25136345],
[0.003752403, 10.2647305],
[0.003690753, 10.27809699],
[0.003630071, 10.29146294],
[0.003570343, 10.30482835],
[0.003511554, 10.31819324],
[0.003453692, 10.3315576],
[0.003396741, 10.34492145],
[0.003340688, 10.35828479],
[0.00328552, 10.37164764],
[0.003231223, 10.38501],
[0.003177785, 10.39837188],
[0.003125192, 10.41173328],
[0.003073432, 10.42509422],
[0.003022493, 10.4384547],
[0.002972361, 10.45181473],
[0.002923025, 10.46517432],
[0.002874473, 10.47853347],
[0.002826693, 10.49189218],
[0.002779672, 10.50525048],
[0.002733401, 10.51860836],
[0.002687867, 10.53196583],
[0.002643059, 10.5453229],
[0.002598967, 10.55867957],
[0.002555579, 10.57203585],
[0.002512884, 10.58539175],
[0.002470873, 10.59874727],
[0.002429534, 10.61210242],
[0.002388858, 10.6254572],
[0.002348834, 10.63881162],
[0.002309452, 10.65216569],
[0.002270703, 10.66551941],
[0.002232577, 10.67887279],
[0.002195064, 10.69222583],
[0.002158155, 10.70557854],
[0.002121841, 10.71893092],
[0.002086112, 10.73228298],
[0.00205096, 10.74563473],
[0.002016376, 10.75898616],
[0.00198235, 10.77233729],
[0.001948875, 10.78568812],
[0.001915942, 10.79903865],
[0.001883542, 10.81238889],
[0.001851668, 10.82573885],
[0.00182031, 10.83908852],
[0.001789462, 10.85243792],
[0.001759115, 10.86578704],
[0.001729262, 10.87913589],
[0.001699894, 10.89248448],
[0.001671005, 10.90583282],
[0.001642587, 10.91918089],
[0.001614632, 10.93252872],
[0.001587133, 10.9458763],
[0.001560084, 10.95922363],
[0.001533477, 10.97257073],
[0.001507305, 10.98591759],
[0.001481562, 10.99926422],
[0.00145624, 11.01261062],
[0.001431334, 11.0259568],
[0.001406837, 11.03930276],
[0.001382741, 11.0526485],
[0.001359042, 11.06599403],
[0.001335733, 11.07933935],
[0.001312807, 11.09268447],
[0.001290259, 11.10602938],
[0.001268083, 11.11937409],
[0.001246272, 11.13271861],
[0.001224822, 11.14606294],
[0.001203726, 11.15940707],
[0.001182979, 11.17275102],
[0.001162575, 11.18609479],
[0.001142509, 11.19943837],
[0.001122775, 11.21278178],
[0.001103369, 11.22612501],
[0.001084285, 11.23946808],
[0.001065517, 11.25281097],
[0.001047062, 11.2661537],
[0.001028914, 11.27949627],
[0.001011067, 11.29283867],
[0.000993518, 11.30618092],
[0.000976262, 11.31952301],
[0.000959293, 11.33286496],
[0.000942608, 11.34620675],
[0.000926202, 11.35954839],
[0.000910069, 11.37288989],
[0.000894207, 11.38623125],
[0.00087861, 11.39957246],
[0.000863275, 11.41291354],
[0.000848197, 11.42625449],
[0.000833372, 11.4395953],
[0.000818796, 11.45293598],
[0.000804464, 11.46627653],
[0.000790374, 11.47961696],
[0.000776521, 11.49295726],
[0.000762902, 11.50629744],
[0.000749512, 11.51963749],
[0.000736347, 11.53297743],
[0.000723405, 11.54631726],
[0.000710682, 11.55965697],
[0.000698174, 11.57299657],
[0.000685877, 11.58633605],
[0.000673789, 11.59967543],
[0.000661905, 11.6130147],
[0.000650223, 11.62635387],
[0.000638739, 11.63969294],
[0.00062745, 11.6530319],
[0.000616353, 11.66637076],
[0.000605444, 11.67970953],
[0.000594722, 11.6930482],
[0.000584181, 11.70638677],
[0.000573821, 11.71972525],
[0.000563637, 11.73306364],
[0.000553627, 11.74640195],
[0.000543788, 11.75974016],
[0.000534117, 11.77307828],
[0.000524612, 11.78641632],
[0.000515269, 11.79975428],
[0.000506086, 11.81309215],
[0.000497061, 11.82642995],
[0.00048819, 11.83976766],
[0.000479472, 11.8531053],
[0.000470903, 11.86644285],
[0.000462482, 11.87978034],
[0.000454206, 11.89311775],
[0.000446072, 11.90645508],
[0.000438079, 11.91979235],
[0.000430223, 11.93312954],
[0.000422502, 11.94646666],
[0.000414915, 11.95980372],
[0.00040746, 11.97314071],
[0.000400133, 11.98647763],
[0.000392933, 11.99981449]]
return zip(*data)
def expected_wli01_100():
# these data were generated by Mathematica
data = [
[0.126156841, 0.00E+00],
[0.136887573, 1.461464526],
[0.1478326, 2.934599556],
[0.158953789, 4.419808031],
[0.170215287, 5.917466654],
[0.181583756, 7.42792666],
[0.193028489, 8.951514805],
[0.204521446, 10.48853451],
[0.216037226, 12.0392671],
[0.227552986, 13.60397314],
[0.23904832, 15.18289372],
[0.250505123, 16.77625177],
[0.261907427, 18.38425336],
[0.273241246, 20.00708895],
[0.284494403, 21.64493451],
[0.295656373, 23.29795271],
[0.306718128, 24.96629391],
[0.317671982, 26.65009713],
[0.328511462, 28.34949103],
[0.339231173, 30.06459466],
[0.349826683, 31.79551832],
[0.360294411, 33.54236419],
[0.370631536, 35.30522706],
[0.380835902, 37.08419489],
[0.39090594, 38.87934936],
[0.400840599, 40.69076641],
[0.410639277, 42.51851666],
[0.420301768, 44.36266585],
[0.429828206, 46.22327523],
[0.439219027, 48.10040188],
[0.44847492, 49.99409907],
[0.457596797, 51.90441656],
[0.466585761, 53.8314008],
[0.475443075, 55.77509525],
[0.48417014, 57.73554056],
[0.49276847, 59.7127748],
[0.501239677, 61.70683362],
[0.509585452, 63.71775045],
[0.517807547, 65.74555662],
[0.525907769, 67.79028156],
[0.53388796, 69.85195287],
[0.541749996, 71.93059648],
[0.549495769, 74.02623674],
[0.557127186, 76.13889655],
[0.56464616, 78.26859742],
[0.572054604, 80.41535958],
[0.579354426, 82.57920203],
[0.586547526, 84.76014267],
[0.593635789, 86.95819829],
[0.600621084, 89.17338469],
[0.607505264, 91.40571673],
[0.614290158, 93.65520835],
[0.620977571, 95.92187265],
[0.627569285, 98.20572191],
[0.634067056, 100.5067677],
[0.640472611, 102.8250207],
[0.646787649, 105.1604912],
[0.65301384, 107.5131884],
[0.659152826, 109.8831213],
[0.665206216, 112.2702981],
[0.671175591, 114.6747263],
[0.677062501, 117.096413],
[0.682868466, 119.5353648],
[0.688594973, 121.9915877],
[0.694243483, 124.4650871],
[0.699815424, 126.9558681],
[0.705312194, 129.4639352],
[0.710735161, 131.9892925],
[0.716085667, 134.5319437],
[0.72136502, 137.0918919],
[0.726574503, 139.6691399],
[0.731715369, 142.26369],
[0.736788844, 144.8755442],
[0.741796126, 147.504704],
[0.746738386, 150.1511704],
[0.751616768, 152.8149442],
[0.75643239, 155.4960256],
[0.761186345, 158.1944146],
[0.765879701, 160.9101105],
[0.770513498, 163.6431125],
[0.775088754, 166.3934191],
[0.779606464, 169.1610287],
[0.784067597, 171.945939],
[0.788473099, 174.7481474],
[0.792823894, 177.5676508],
[0.797120883, 180.4044459],
[0.801364946, 183.2585284],
[0.805556938, 186.1298942],
[0.809697696, 189.0185382],
[0.813788033, 191.924455],
[0.817828743, 194.8476388],
[0.821820598, 197.788083],
[0.82576435, 200.7457807],
[0.829660732, 203.7207243],
[0.833510454, 206.7129055],
[0.837314208, 209.7223157],
[0.841072666, 212.7489454],
[0.844786481, 215.7927843],
[0.848456285, 218.8538218],
[0.852082691, 221.9320461],
[0.855666293, 225.0274448],
[0.859207663, 228.1400048],
[0.862707357, 231.2697119],
[0.866165909, 234.416551],
[0.869583832, 237.5805061],
[0.872961621, 240.76156],
[0.876299749, 243.9596946],
[0.879598668, 247.1748905],
[0.88285881, 250.4071269],
[0.886080583, 253.6563819],
[0.889264375, 256.922632],
[0.89241055, 260.2058521],
[0.895519447, 263.5060156],
[0.898591382, 266.823094],
[0.901626644, 270.1570571],
[0.904625496, 273.5078725],
[0.907588173, 276.8755056],
[0.91051488, 280.2599195],
[0.913405793, 283.6610746],
[0.916261052, 287.078929],
[0.919080764, 290.5134372],
[0.921865001, 293.964551],
[0.924613794, 297.4322186],
[0.927327131, 300.9163843],
[0.93000496, 304.4169885],
[0.932647175, 307.9339671],
[0.935253625, 311.4672512],
[0.937824099, 315.0167669],
[0.94035833, 318.5824344],
[0.942855985, 322.1641678],
[0.945316665, 325.7618746],
[0.947739895, 329.375455],
[0.950125122, 333.0048012],
[0.952471706, 336.6497969],
[0.954778916, 340.3103162],
[0.957045922, 343.9862232],
[0.959271792, 347.6773708],
[0.961455478, 351.3835997],
[0.963595817, 355.1047378],
[0.965691518, 358.8405985],
[0.967741162, 362.5909798],
[0.969743194, 366.3556633],
[0.971695921, 370.1344126],
[0.97359751, 373.9269719],
[0.975445989, 377.7330652],
[0.977239256, 381.5523943],
[0.978975084, 385.3846381],
[0.980651136, 389.229451],
[0.982264991, 393.0864621],
[0.983814169, 396.9552743],
[0.985296172, 400.8354635],
[0.986708527, 404.7265791],
[0.988048849, 408.6281432],
[0.989314907, 412.5396525],
[0.990504697, 416.4605789],
[0.991616533, 420.3903721],
[0.992649133, 424.3284626],
[0.993601714, 428.2742651],
[0.994474073, 432.2271834],
[0.995266671, 436.1866158],
[0.995980691, 440.1519606],
[0.996618075, 444.1226232],
[0.997181534, 448.0980225],
[0.997674522, 452.0775976],
[0.998101177, 456.0608141],
[0.998466224, 460.0471699],
[0.998774852, 464.0362003],
[0.999032566, 468.0274812],
[0.999245027, 472.0206317],
[0.999417892, 476.015315],
[0.999556659, 480.0112383],
[0.999666537, 484.0081511],
[0.999752338, 488.0058425],
[0.999818401, 492.004138],
[0.99986855, 496.0028957],
[0.99990608, 500.0020018],
[0.999933766, 504.001367],
[0.999953901, 508.0009221],
[0.999968335, 512.0006143],
[0.999978535, 516.0004042],
[0.99998564, 520.0002627],
[0.99999052, 524.0001686],
[0.999993824, 528.0001068],
[0.99999603, 532.0000669],
[0.999997481, 536.0000413],
[0.999998423, 540.0000252],
[0.999999026, 544.0000152],
[0.999999406, 548.000009],
[0.999999643, 552.0000053],
[0.999999788, 556.0000031],
[0.999999876, 560.0000018],
[0.999999928, 564.000001],
[0.999999959, 568.0000006],
[0.999999977, 572.0000003],
[0.999999987, 576.0000002],
[0.999999993, 580.0000001],
[0.999999996, 584],
[0.999999998, 588],
[0.999999999, 592],
[0.999999999, 596],
[1, 600],
[1, 604],
[1, 608],
[1, 612],
[1, 616],
[1, 620],
[1, 624],
[1, 628],
[1, 632],
[1, 636],
[1, 640],
[1, 644],
[1, 648],
[1, 652],
[1, 656],
[1, 660],
[1, 664],
[1, 668],
[1, 672],
[1, 676],
[1, 680],
[1, 684],
[1, 688],
[1, 692],
[1, 696],
[1, 700],
[1, 704],
[1, 708],
[1, 712],
[1, 716],
[1, 720],
[1, 724],
[1, 728],
[1, 732],
[1, 736],
[1, 740],
[1, 744],
[1, 748],
[1, 752],
[1, 756],
[1, 760],
[1, 764],
[1, 768],
[1, 772],
[1, 776],
[1, 780],
[1, 784],
[1, 788],
[1, 792],
[1, 796],
[1, 800],
[1, 804],
[1, 808],
[1, 812],
[1, 816],
[1, 820],
[1, 824],
[1, 828],
[1, 832],
[1, 836],
[1, 840],
[1, 844],
[1, 848],
[1, 852],
[1, 856],
[1, 860],
[1, 864],
[1, 868],
[1, 872],
[1, 876],
[1, 880],
[1, 884],
[1, 888],
[1, 892],
[1, 896],
[1, 900],
[1, 904],
[1, 908],
[1, 912],
[1, 916],
[1, 920],
[1, 924],
[1, 928],
[1, 932],
[1, 936],
[1, 940],
[1, 944],
[1, 948],
[1, 952],
[1, 956],
[1, 960],
[1, 964],
[1, 968],
[1, 972],
[1, 976],
[1, 980],
[1, 984],
[1, 988],
[1, 992],
[1, 996],
[1, 1000],
[1, 1004],
[1, 1008],
[1, 1012],
[1, 1016],
[1, 1020],
[1, 1024],
[1, 1028],
[1, 1032],
[1, 1036],
[1, 1040],
[1, 1044],
[1, 1048],
[1, 1052],
[1, 1056],
[1, 1060],
[1, 1064],
[1, 1068],
[1, 1072],
[1, 1076],
[1, 1080],
[1, 1084],
[1, 1088],
[1, 1092],
[1, 1096],
[1, 1100],
[1, 1104],
[1, 1108],
[1, 1112],
[1, 1116],
[1, 1120],
[1, 1124],
[1, 1128],
[1, 1132],
[1, 1136],
[1, 1140],
[1, 1144],
[1, 1148],
[1, 1152],
[1, 1156],
[1, 1160],
[1, 1164],
[1, 1168],
[1, 1172],
[1, 1176],
[1, 1180],
[1, 1184],
[1, 1188],
[1, 1192],
[1, 1196],
[1, 1200],
[1, 1204],
[1, 1208],
[1, 1212],
[1, 1216],
[1, 1220],
[1, 1224],
[1, 1228],
[1, 1232],
[1, 1236],
[1, 1240],
[1, 1244],
[1, 1248],
[1, 1252],
[1, 1256],
[1, 1260],
[1, 1264],
[1, 1268],
[1, 1272],
[1, 1276],
[1, 1280],
[1, 1284],
[1, 1288],
[1, 1292],
[1, 1296],
[1, 1300],
[1, 1304],
[1, 1308],
[1, 1312],
[1, 1316],
[1, 1320],
[1, 1324],
[1, 1328],
[1, 1332],
[1, 1336],
[1, 1340],
[1, 1344],
[1, 1348],
[1, 1352],
[1, 1356],
[1, 1360],
[1, 1364],
[1, 1368],
[1, 1372],
[1, 1376],
[1, 1380],
[1, 1384],
[1, 1388],
[1, 1392],
[1, 1396],
[1, 1400],
[1, 1404],
[1, 1408],
[1, 1412],
[1, 1416],
[1, 1420],
[1, 1424],
[1, 1428],
[1, 1432],
[1, 1436],
[1, 1440],
[1, 1444],
[1, 1448],
[1, 1452],
[1, 1456],
[1, 1460],
[1, 1464],
[1, 1468],
[1, 1472],
[1, 1476],
[1, 1480],
[1, 1484],
[1, 1488],
[1, 1492],
[1, 1496],
[1, 1500],
[1, 1504],
[1, 1508],
[1, 1512],
[1, 1516],
[1, 1520],
[1, 1524],
[1, 1528],
[1, 1532],
[1, 1536],
[1, 1540],
[1, 1544],
[1, 1548],
[1, 1552],
[1, 1556],
[1, 1560],
[1, 1564],
[1, 1568],
[1, 1572],
[1, 1576],
[1, 1580],
[1, 1584],
[1, 1588],
[1, 1592],
[1, 1596],
[1, 1600],
[1, 1604],
[1, 1608],
[1, 1612],
[1, 1616],
[1, 1620],
[1, 1624],
[1, 1628],
[1, 1632],
[1, 1636],
[1, 1640],
[1, 1644],
[1, 1648],
[1, 1652],
[1, 1656],
[1, 1660],
[1, 1664],
[1, 1668],
[1, 1672],
[1, 1676],
[1, 1680],
[1, 1684],
[1, 1688],
[1, 1692],
[1, 1696],
[1, 1700],
[1, 1704],
[1, 1708],
[1, 1712],
[1, 1716],
[1, 1720],
[1, 1724],
[1, 1728],
[1, 1732],
[1, 1736],
[1, 1740],
[1, 1744],
[1, 1748],
[1, 1752],
[1, 1756],
[1, 1760],
[1, 1764],
[1, 1768],
[1, 1772],
[1, 1776],
[1, 1780],
[1, 1784],
[1, 1788],
[1, 1792],
[1, 1796],
[1, 1800],
[1, 1804],
[1, 1808],
[1, 1812],
[1, 1816],
[1, 1820],
[1, 1824],
[1, 1828],
[1, 1832],
[1, 1836],
[1, 1840],
[1, 1844],
[1, 1848],
[1, 1852],
[1, 1856],
[1, 1860],
[1, 1864],
[1, 1868],
[1, 1872],
[1, 1876],
[1, 1880],
[1, 1884],
[1, 1888],
[1, 1892],
[1, 1896],
[1, 1900],
[1, 1904],
[1, 1908],
[1, 1912],
[1, 1916],
[1, 1920],
[1, 1924],
[1, 1928],
[1, 1932],
[1, 1936],
[1, 1940],
[1, 1944],
[1, 1948],
[1, 1952],
[1, 1956],
[1, 1960],
[1, 1964],
[1, 1968],
[1, 1972],
[1, 1976],
[1, 1980],
[1, 1984],
[1, 1988],
[1, 1992],
[1, 1996],
[1, 2000],
[1, 2004],
[1, 2008],
[1, 2012],
[1, 2016],
[1, 2020],
[1, 2024],
[1, 2028],
[1, 2032],
[1, 2036],
[1, 2040],
[1, 2044],
[1, 2048],
[1, 2052],
[1, 2056],
[1, 2060],
[1, 2064],
[1, 2068],
[1, 2072],
[1, 2076],
[1, 2080],
[1, 2084],
[1, 2088],
[1, 2092],
[1, 2096],
[1, 2100],
[1, 2104],
[1, 2108],
[1, 2112],
[1, 2116],
[1, 2120],
[1, 2124],
[1, 2128],
[1, 2132],
[1, 2136],
[1, 2140],
[1, 2144],
[1, 2148],
[1, 2152],
[1, 2156],
[1, 2160],
[1, 2164],
[1, 2168],
[1, 2172],
[1, 2176],
[1, 2180],
[1, 2184],
[1, 2188],
[1, 2192],
[1, 2196],
[1, 2200],
[1, 2204],
[1, 2208],
[1, 2212],
[1, 2216],
[1, 2220],
[1, 2224],
[1, 2228],
[1, 2232],
[1, 2236],
[1, 2240],
[1, 2244],
[1, 2248],
[1, 2252],
[1, 2256],
[1, 2260],
[1, 2264],
[1, 2268],
[1, 2272],
[1, 2276],
[1, 2280],
[1, 2284],
[1, 2288],
[1, 2292],
[1, 2296],
[1, 2300],
[1, 2304],
[1, 2308],
[1, 2312],
[1, 2316],
[1, 2320],
[1, 2324],
[1, 2328],
[1, 2332],
[1, 2336],
[1, 2340],
[1, 2344],
[1, 2348],
[1, 2352],
[1, 2356],
[1, 2360],
[1, 2364],
[1, 2368],
[1, 2372],
[1, 2376],
[1, 2380],
[1, 2384],
[1, 2388],
[1, 2392],
[1, 2396],
[1, 2400],
[1, 2404],
[1, 2408],
[1, 2412],
[1, 2416],
[1, 2420],
[1, 2424],
[1, 2428],
[1, 2432],
[1, 2436],
[1, 2440],
[1, 2444],
[1, 2448],
[1, 2452],
[1, 2456],
[1, 2460],
[1, 2464],
[1, 2468],
[1, 2472],
[1, 2476],
[1, 2480],
[1, 2484],
[1, 2488],
[1, 2492],
[1, 2496],
[1, 2500],
[1, 2504],
[1, 2508],
[1, 2512],
[1, 2516],
[1, 2520],
[1, 2524],
[1, 2528],
[1, 2532],
[1, 2536],
[1, 2540],
[1, 2544],
[1, 2548],
[1, 2552],
[1, 2556],
[1, 2560],
[1, 2564],
[1, 2568],
[1, 2572],
[1, 2576],
[1, 2580],
[1, 2584],
[1, 2588],
[1, 2592],
[1, 2596],
[1, 2600],
[1, 2604],
[1, 2608],
[1, 2612],
[1, 2616],
[1, 2620],
[1, 2624],
[1, 2628],
[1, 2632],
[1, 2636],
[1, 2640],
[1, 2644],
[1, 2648],
[1, 2652],
[1, 2656],
[1, 2660],
[1, 2664],
[1, 2668],
[1, 2672],
[1, 2676],
[1, 2680],
[1, 2684],
[1, 2688],
[1, 2692],
[1, 2696],
[1, 2700],
[1, 2704],
[1, 2708],
[1, 2712],
[1, 2716],
[1, 2720],
[1, 2724],
[1, 2728],
[1, 2732],
[1, 2736],
[1, 2740],
[1, 2744],
[1, 2748],
[1, 2752],
[1, 2756],
[1, 2760],
[1, 2764],
[1, 2768],
[1, 2772],
[1, 2776],
[1, 2780],
[1, 2784],
[1, 2788],
[1, 2792],
[1, 2796],
[1, 2800],
[1, 2804],
[1, 2808],
[1, 2812],
[1, 2816],
[1, 2820],
[1, 2824],
[1, 2828],
[1, 2832],
[1, 2836],
[1, 2840],
[1, 2844],
[1, 2848],
[1, 2852],
[1, 2856],
[1, 2860],
[1, 2864],
[1, 2868],
[1, 2872],
[1, 2876],
[1, 2880],
[1, 2884],
[1, 2888],
[1, 2892],
[1, 2896],
[1, 2900],
[1, 2904],
[1, 2908],
[1, 2912],
[1, 2916],
[1, 2920],
[1, 2924],
[1, 2928],
[1, 2932],
[1, 2936],
[1, 2940],
[1, 2944],
[1, 2948],
[1, 2952],
[1, 2956],
[1, 2960],
[1, 2964],
[1, 2968],
[1, 2972],
[1, 2976],
[1, 2980],
[1, 2984],
[1, 2988],
[1, 2992],
[1, 2996],
[1, 3000],
[1, 3004],
[1, 3008],
[1, 3012],
[1, 3016],
[1, 3020],
[1, 3024],
[1, 3028],
[1, 3032],
[1, 3036],
[1, 3040],
[1, 3044],
[1, 3048],
[1, 3052],
[1, 3056],
[1, 3060],
[1, 3064],
[1, 3068],
[1, 3072],
[1, 3076],
[1, 3080],
[1, 3084],
[1, 3088],
[1, 3092],
[1, 3096],
[1, 3100],
[1, 3104],
[1, 3108],
[1, 3112],
[1, 3116],
[1, 3120],
[1, 3124],
[1, 3128],
[1, 3132],
[1, 3136],
[1, 3140],
[1, 3144],
[1, 3148],
[1, 3152],
[1, 3156],
[1, 3160],
[1, 3164],
[1, 3168],
[1, 3172],
[1, 3176],
[1, 3180],
[1, 3184],
[1, 3188],
[1, 3192],
[1, 3196],
[1, 3200],
[1, 3204],
[1, 3208],
[1, 3212],
[1, 3216],
[1, 3220],
[1, 3224],
[1, 3228],
[1, 3232],
[1, 3236],
[1, 3240],
[1, 3244],
[1, 3248],
[1, 3252],
[1, 3256],
[1, 3260],
[1, 3264],
[1, 3268],
[1, 3272],
[1, 3276],
[1, 3280],
[1, 3284],
[1, 3288],
[1, 3292],
[1, 3296],
[1, 3300],
[1, 3304],
[1, 3308],
[1, 3312],
[1, 3316],
[1, 3320],
[1, 3324],
[1, 3328],
[1, 3332],
[1, 3336],
[1, 3340],
[1, 3344],
[1, 3348],
[1, 3352],
[1, 3356],
[1, 3360],
[1, 3364],
[1, 3368],
[1, 3372],
[1, 3376],
[1, 3380],
[1, 3384],
[1, 3388],
[1, 3392],
[1, 3396],
[1, 3400],
[1, 3404],
[1, 3408],
[1, 3412],
[1, 3416],
[1, 3420],
[1, 3424],
[1, 3428],
[1, 3432],
[1, 3436],
[1, 3440],
[1, 3444],
[1, 3448],
[1, 3452],
[1, 3456],
[1, 3460],
[1, 3464],
[1, 3468],
[1, 3472],
[1, 3476],
[1, 3480],
[1, 3484],
[1, 3488],
[1, 3492],
[1, 3496],
[1, 3500],
[1, 3504],
[1, 3508],
[1, 3512],
[1, 3516],
[1, 3520],
[1, 3524],
[1, 3528],
[1, 3532],
[1, 3536],
[1, 3540],
[1, 3544],
[1, 3548],
[1, 3552],
[1, 3556],
[1, 3560],
[1, 3564],
[1, 3568],
[1, 3572],
[1, 3576],
[1, 3580],
[1, 3584],
[1, 3588],
[1, 3592],
[1, 3596],
[1, 3600],
[1, 3604],
[1, 3608],
[1, 3612],
[1, 3616],
[1, 3620],
[1, 3624],
[1, 3628],
[1, 3632],
[1, 3636],
[1, 3640],
[1, 3644],
[1, 3648],
[1, 3652],
[1, 3656],
[1, 3660],
[1, 3664],
[1, 3668],
[1, 3672],
[1, 3676],
[1, 3680],
[1, 3684],
[1, 3688],
[1, 3692],
[1, 3696],
[1, 3700],
[1, 3704],
[1, 3708],
[1, 3712],
[1, 3716],
[1, 3720],
[1, 3724],
[1, 3728],
[1, 3732],
[1, 3736],
[1, 3740],
[1, 3744],
[1, 3748],
[1, 3752],
[1, 3756],
[1, 3760],
[1, 3764],
[1, 3768],
[1, 3772],
[1, 3776],
[1, 3780],
[1, 3784],
[1, 3788],
[1, 3792],
[1, 3796],
[1, 3800],
[1, 3804],
[1, 3808],
[1, 3812],
[1, 3816],
[1, 3820],
[1, 3824],
[1, 3828],
[1, 3832],
[1, 3836],
[1, 3840],
[1, 3844],
[1, 3848],
[1, 3852],
[1, 3856],
[1, 3860],
[1, 3864],
[1, 3868],
[1, 3872],
[1, 3876],
[1, 3880],
[1, 3884],
[1, 3888],
[1, 3892],
[1, 3896],
[1, 3900],
[1, 3904],
[1, 3908],
[1, 3912],
[1, 3916],
[1, 3920],
[1, 3924],
[1, 3928],
[1, 3932],
[1, 3936],
[1, 3940],
[1, 3944],
[1, 3948],
[1, 3952],
[1, 3956],
[1, 3960],
[1, 3964],
[1, 3968],
[1, 3972],
[1, 3976],
[1, 3980],
[1, 3984],
[1, 3988],
[1, 3992],
[1, 3996],
[1, 4000],
[1, 4004],
[1, 4008],
[1, 4012],
[1, 4016],
[1, 4020],
[1, 4024],
[1, 4028],
[1, 4032],
[1, 4036],
[1, 4040],
[1, 4044],
[1, 4048],
[1, 4052],
[1, 4056],
[1, 4060],
[1, 4064],
[1, 4068],
[1, 4072],
[1, 4076],
[1, 4080],
[1, 4084],
[1, 4088],
[1, 4092],
[1, 4096],
[1, 4100],
[1, 4104],
[1, 4108],
[1, 4112],
[1, 4116],
[1, 4120],
[1, 4124],
[1, 4128],
[1, 4132],
[1, 4136],
[1, 4140],
[1, 4144],
[1, 4148],
[1, 4152],
[1, 4156],
[1, 4160],
[1, 4164],
[1, 4168],
[1, 4172],
[1, 4176],
[1, 4180],
[1, 4184],
[1, 4188],
[1, 4192],
[1, 4196],
[1, 4200],
[1, 4204],
[1, 4208],
[1, 4212],
[1, 4216],
[1, 4220],
[1, 4224],
[1, 4228],
[1, 4232],
[1, 4236],
[1, 4240],
[1, 4244],
[1, 4248],
[1, 4252],
[1, 4256],
[1, 4260],
[1, 4264],
[1, 4268],
[1, 4272],
[1, 4276],
[1, 4280],
[1, 4284],
[1, 4288],
[1, 4292],
[1, 4296],
[1, 4300],
[1, 4304],
[1, 4308],
[1, 4312],
[1, 4316],
[1, 4320],
[1, 4324],
[1, 4328],
[1, 4332],
[1, 4336],
[1, 4340],
[1, 4344],
[1, 4348],
[1, 4352],
[1, 4356],
[1, 4360],
[1, 4364],
[1, 4368],
[1, 4372],
[1, 4376],
[1, 4380],
[1, 4384],
[1, 4388],
[1, 4392],
[1, 4396],
[1, 4400],
[1, 4404],
[1, 4408],
[1, 4412],
[1, 4416],
[1, 4420],
[1, 4424],
[1, 4428],
[1, 4432],
[1, 4436],
[1, 4440],
[1, 4444],
[1, 4448],
[1, 4452],
[1, 4456],
[1, 4460],
[1, 4464],
[1, 4468],
[1, 4472],
[1, 4476],
[1, 4480],
[1, 4484],
[1, 4488],
[1, 4492],
[1, 4496],
[1, 4500],
[1, 4504],
[1, 4508],
[1, 4512],
[1, 4516],
[1, 4520],
[1, 4524],
[1, 4528],
[1, 4532],
[1, 4536],
[1, 4540],
[1, 4544],
[1, 4548],
[1, 4552],
[1, 4556],
[1, 4560],
[1, 4564],
[1, 4568],
[1, 4572],
[1, 4576],
[1, 4580],
[1, 4584],
[1, 4588],
[1, 4592],
[1, 4596],
[1, 4600],
[1, 4604],
[1, 4608],
[1, 4612],
[1, 4616],
[1, 4620],
[1, 4624],
[1, 4628],
[1, 4632],
[1, 4636],
[1, 4640],
[1, 4644],
[1, 4648],
[1, 4652],
[1, 4656],
[1, 4660],
[1, 4664],
[1, 4668],
[1, 4672],
[1, 4676],
[1, 4680],
[1, 4684],
[1, 4688],
[1, 4692],
[1, 4696],
[1, 4700],
[1, 4704],
[1, 4708],
[1, 4712],
[1, 4716],
[1, 4720],
[1, 4724],
[1, 4728],
[1, 4732],
[1, 4736],
[1, 4740],
[1, 4744],
[1, 4748],
[1, 4752],
[1, 4756],
[1, 4760],
[1, 4764],
[1, 4768],
[1, 4772],
[1, 4776],
[1, 4780],
[1, 4784],
[1, 4788],
[1, 4792],
[1, 4796],
[1, 4800],
[1, 4804],
[1, 4808],
[1, 4812],
[1, 4816],
[1, 4820],
[1, 4824],
[1, 4828],
[1, 4832],
[1, 4836],
[1, 4840],
[1, 4844],
[1, 4848],
[1, 4852],
[1, 4856],
[1, 4860],
[1, 4864],
[1, 4868],
[1, 4872],
[1, 4876],
[1, 4880],
[1, 4884],
[1, 4888],
[1, 4892],
[1, 4896],
[1, 4900],
[1, 4904],
[1, 4908],
[1, 4912],
[1, 4916],
[1, 4920],
[1, 4924],
[1, 4928],
[1, 4932],
[1, 4936],
[1, 4940],
[1, 4944],
[1, 4948],
[1, 4952],
[1, 4956],
[1, 4960],
[1, 4964],
[1, 4968],
[1, 4972],
[1, 4976],
[1, 4980],
[1, 4984],
[1, 4988],
[1, 4992],
[1, 4996],
[1, 5000],
[1, 5004],
[1, 5008],
[1, 5012],
[1, 5016],
[1, 5020],
[1, 5024],
[1, 5028],
[1, 5032],
[1, 5036],
[1, 5040],
[1, 5044],
[1, 5048],
[1, 5052],
[1, 5056],
[1, 5060],
[1, 5064],
[1, 5068],
[1, 5072],
[1, 5076],
[1, 5080],
[1, 5084],
[1, 5088],
[1, 5092],
[1, 5096],
[1, 5100],
[1, 5104],
[1, 5108],
[1, 5112],
[1, 5116],
[1, 5120],
[1, 5124],
[1, 5128],
[1, 5132],
[1, 5136],
[1, 5140],
[1, 5144],
[1, 5148],
[1, 5152],
[1, 5156],
[1, 5160],
[1, 5164],
[1, 5168],
[1, 5172],
[1, 5176],
[1, 5180],
[1, 5184],
[1, 5188],
[1, 5192],
[1, 5196],
[1, 5200],
[1, 5204],
[1, 5208],
[1, 5212],
[1, 5216],
[1, 5220],
[1, 5224],
[1, 5228],
[1, 5232],
[1, 5236],
[1, 5240],
[1, 5244],
[1, 5248],
[1, 5252],
[1, 5256],
[1, 5260],
[1, 5264],
[1, 5268],
[1, 5272],
[1, 5276],
[1, 5280],
[1, 5284],
[1, 5288],
[1, 5292],
[1, 5296],
[1, 5300],
[1, 5304],
[1, 5308],
[1, 5312],
[1, 5316],
[1, 5320],
[1, 5324],
[1, 5328],
[1, 5332],
[1, 5336],
[1, 5340],
[1, 5344],
[1, 5348],
[1, 5352],
[1, 5356],
[1, 5360],
[1, 5364],
[1, 5368],
[1, 5372],
[1, 5376],
[1, 5380],
[1, 5384],
[1, 5388],
[1, 5392],
[1, 5396],
[1, 5400],
[1, 5404],
[1, 5408],
[1, 5412],
[1, 5416],
[1, 5420],
[1, 5424],
[1, 5428],
[1, 5432],
[1, 5436],
[1, 5440],
[1, 5444],
[1, 5448],
[1, 5452],
[1, 5456],
[1, 5460],
[1, 5464],
[1, 5468],
[1, 5472],
[1, 5476],
[1, 5480],
[1, 5484],
[1, 5488],
[1, 5492],
[1, 5496],
[1, 5500],
[1, 5504],
[1, 5508],
[1, 5512],
[1, 5516],
[1, 5520],
[1, 5524],
[1, 5528],
[1, 5532],
[1, 5536],
[1, 5540],
[1, 5544],
[1, 5548],
[1, 5552],
[1, 5556],
[1, 5560],
[1, 5564],
[1, 5568],
[1, 5572],
[1, 5576],
[1, 5580],
[1, 5584],
[1, 5588],
[1, 5592],
[1, 5596],
[1, 5600],
[1, 5604],
[1, 5608],
[1, 5612],
[1, 5616],
[1, 5620],
[1, 5624],
[1, 5628],
[1, 5632],
[1, 5636],
[1, 5640],
[1, 5644],
[1, 5648],
[1, 5652],
[1, 5656],
[1, 5660],
[1, 5664],
[1, 5668],
[1, 5672],
[1, 5676],
[1, 5680],
[1, 5684],
[1, 5688],
[1, 5692],
[1, 5696],
[1, 5700],
[1, 5704],
[1, 5708],
[1, 5712],
[1, 5716],
[1, 5720],
[1, 5724],
[1, 5728],
[1, 5732],
[1, 5736],
[1, 5740],
[1, 5744],
[1, 5748],
[1, 5752],
[1, 5756],
[1, 5760],
[1, 5764],
[1, 5768],
[1, 5772],
[1, 5776],
[1, 5780],
[1, 5784],
[1, 5788],
[1, 5792],
[1, 5796],
[1, 5800],
[1, 5804],
[1, 5808],
[1, 5812],
[1, 5816],
[1, 5820],
[1, 5824],
[1, 5828],
[1, 5832],
[1, 5836],
[1, 5840],
[1, 5844],
[1, 5848],
[1, 5852],
[1, 5856],
[1, 5860],
[1, 5864],
[1, 5868],
[1, 5872],
[1, 5876],
[1, 5880],
[1, 5884],
[1, 5888],
[1, 5892],
[1, 5896],
[1, 5900],
[1, 5904],
[1, 5908],
[1, 5912],
[1, 5916],
[1, 5920],
[1, 5924],
[1, 5928],
[1, 5932],
[1, 5936],
[1, 5940],
[1, 5944],
[1, 5948],
[1, 5952],
[1, 5956],
[1, 5960],
[1, 5964],
[1, 5968],
[1, 5972],
[1, 5976],
[1, 5980],
[1, 5984],
[1, 5988],
[1, 5992],
[1, 5996],
[1, 6000],
[1, 6004],
[1, 6008],
[1, 6012],
[1, 6016],
[1, 6020],
[1, 6024],
[1, 6028],
[1, 6032],
[1, 6036],
[1, 6040],
[1, 6044],
[1, 6048],
[1, 6052],
[1, 6056],
[1, 6060],
[1, 6064],
[1, 6068],
[1, 6072],
[1, 6076],
[1, 6080],
[1, 6084],
[1, 6088],
[1, 6092],
[1, 6096],
[1, 6100],
[1, 6104],
[1, 6108],
[1, 6112],
[1, 6116],
[1, 6120],
[1, 6124],
[1, 6128],
[1, 6132],
[1, 6136],
[1, 6140],
[1, 6144],
[1, 6148],
[1, 6152],
[1, 6156],
[1, 6160],
[1, 6164],
[1, 6168],
[1, 6172],
[1, 6176],
[1, 6180],
[1, 6184],
[1, 6188],
[1, 6192],
[1, 6196],
[1, 6200],
[1, 6204],
[1, 6208],
[1, 6212],
[1, 6216],
[1, 6220],
[1, 6224],
[1, 6228],
[1, 6232],
[1, 6236],
[1, 6240],
[1, 6244],
[1, 6248],
[1, 6252],
[1, 6256],
[1, 6260],
[1, 6264],
[1, 6268],
[1, 6272],
[1, 6276],
[1, 6280],
[1, 6284],
[1, 6288],
[1, 6292],
[1, 6296],
[1, 6300],
[1, 6304],
[1, 6308],
[1, 6312],
[1, 6316],
[1, 6320],
[1, 6324],
[1, 6328],
[1, 6332],
[1, 6336],
[1, 6340],
[1, 6344],
[1, 6348],
[1, 6352],
[1, 6356],
[1, 6360],
[1, 6364],
[1, 6368],
[1, 6372],
[1, 6376],
[1, 6380],
[1, 6384],
[1, 6388],
[1, 6392],
[1, 6396],
[1, 6400],
[1, 6404],
[1, 6408],
[1, 6412],
[1, 6416],
[1, 6420],
[1, 6424],
[1, 6428],
[1, 6432],
[1, 6436],
[1, 6440],
[1, 6444],
[1, 6448],
[1, 6452],
[1, 6456],
[1, 6460],
[1, 6464],
[1, 6468],
[1, 6472],
[1, 6476],
[1, 6480],
[1, 6484],
[1, 6488],
[1, 6492],
[1, 6496],
[1, 6500],
[1, 6504],
[1, 6508],
[1, 6512],
[1, 6516],
[1, 6520],
[1, 6524],
[1, 6528],
[1, 6532],
[1, 6536],
[1, 6540],
[1, 6544],
[1, 6548],
[1, 6552],
[1, 6556],
[1, 6560],
[1, 6564],
[1, 6568],
[1, 6572],
[1, 6576],
[1, 6580],
[1, 6584],
[1, 6588],
[1, 6592],
[1, 6596],
[1, 6600],
[1, 6604],
[1, 6608],
[1, 6612],
[1, 6616],
[1, 6620],
[1, 6624],
[1, 6628],
[1, 6632],
[1, 6636],
[1, 6640],
[1, 6644],
[1, 6648],
[1, 6652],
[1, 6656],
[1, 6660],
[1, 6664],
[1, 6668],
[1, 6672],
[1, 6676],
[1, 6680],
[1, 6684],
[1, 6688],
[1, 6692],
[1, 6696],
[1, 6700],
[1, 6704],
[1, 6708],
[1, 6712],
[1, 6716],
[1, 6720],
[1, 6724],
[1, 6728],
[1, 6732],
[1, 6736],
[1, 6740],
[1, 6744],
[1, 6748],
[1, 6752],
[1, 6756],
[1, 6760],
[1, 6764],
[1, 6768],
[1, 6772],
[1, 6776],
[1, 6780],
[1, 6784],
[1, 6788],
[1, 6792],
[1, 6796],
[1, 6800],
[1, 6804],
[1, 6808],
[1, 6812],
[1, 6816],
[1, 6820],
[1, 6824],
[1, 6828],
[1, 6832],
[1, 6836],
[1, 6840],
[1, 6844],
[1, 6848],
[1, 6852],
[1, 6856],
[1, 6860],
[1, 6864],
[1, 6868],
[1, 6872],
[1, 6876],
[1, 6880],
[1, 6884],
[1, 6888],
[1, 6892],
[1, 6896],
[1, 6900],
[1, 6904],
[1, 6908],
[1, 6912],
[1, 6916],
[1, 6920],
[1, 6924],
[1, 6928],
[1, 6932],
[1, 6936],
[1, 6940],
[1, 6944],
[1, 6948],
[1, 6952],
[1, 6956],
[1, 6960],
[1, 6964],
[1, 6968],
[1, 6972],
[1, 6976],
[1, 6980],
[1, 6984],
[1, 6988],
[1, 6992],
[1, 6996],
[1, 7000],
[1, 7004],
[1, 7008],
[1, 7012],
[1, 7016],
[1, 7020],
[1, 7024],
[1, 7028],
[1, 7032],
[1, 7036],
[1, 7040],
[1, 7044],
[1, 7048],
[1, 7052],
[1, 7056],
[1, 7060],
[1, 7064],
[1, 7068],
[1, 7072],
[1, 7076],
[1, 7080],
[1, 7084],
[1, 7088],
[1, 7092],
[1, 7096],
[1, 7100],
[1, 7104],
[1, 7108],
[1, 7112],
[1, 7116],
[1, 7120],
[1, 7124],
[1, 7128],
[1, 7132],
[1, 7136],
[1, 7140],
[1, 7144],
[1, 7148],
[1, 7152],
[1, 7156],
[1, 7160],
[1, 7164],
[1, 7168],
[1, 7172],
[1, 7176],
[1, 7180],
[1, 7184],
[1, 7188],
[1, 7192],
[1, 7196],
[1, 7200],
[1, 7204],
[1, 7208],
[1, 7212],
[1, 7216],
[1, 7220],
[1, 7224],
[1, 7228],
[1, 7232],
[1, 7236],
[1, 7240],
[1, 7244],
[1, 7248],
[1, 7252],
[1, 7256],
[1, 7260],
[1, 7264],
[1, 7268],
[1, 7272],
[1, 7276],
[1, 7280],
[1, 7284],
[1, 7288],
[1, 7292],
[1, 7296],
[1, 7300],
[1, 7304],
[1, 7308],
[1, 7312],
[1, 7316],
[1, 7320],
[1, 7324],
[1, 7328],
[1, 7332],
[1, 7336],
[1, 7340],
[1, 7344],
[1, 7348],
[1, 7352],
[1, 7356],
[1, 7360],
[1, 7364],
[1, 7368],
[1, 7372],
[1, 7376],
[1, 7380],
[1, 7384],
[1, 7388],
[1, 7392],
[1, 7396],
[1, 7400],
[1, 7404],
[1, 7408],
[1, 7412],
[1, 7416],
[1, 7420],
[1, 7424],
[1, 7428],
[1, 7432],
[1, 7436],
[1, 7440],
[1, 7444],
[1, 7448],
[1, 7452],
[1, 7456],
[1, 7460],
[1, 7464],
[1, 7468],
[1, 7472],
[1, 7476],
[1, 7480],
[1, 7484],
[1, 7488],
[1, 7492],
[1, 7496],
[1, 7500],
[1, 7504],
[1, 7508],
[1, 7512],
[1, 7516],
[1, 7520],
[1, 7524],
[1, 7528],
[1, 7532],
[1, 7536],
[1, 7540],
[1, 7544],
[1, 7548],
[1, 7552],
[1, 7556],
[1, 7560],
[1, 7564],
[1, 7568],
[1, 7572],
[1, 7576],
[1, 7580],
[1, 7584],
[1, 7588],
[1, 7592],
[1, 7596],
[1, 7600],
[1, 7604],
[1, 7608],
[1, 7612],
[1, 7616],
[1, 7620],
[1, 7624],
[1, 7628],
[1, 7632],
[1, 7636],
[1, 7640],
[1, 7644],
[1, 7648],
[1, 7652],
[1, 7656],
[1, 7660],
[1, 7664],
[1, 7668],
[1, 7672],
[1, 7676],
[1, 7680],
[1, 7684],
[1, 7688],
[1, 7692],
[1, 7696],
[1, 7700],
[1, 7704],
[1, 7708],
[1, 7712],
[1, 7716],
[1, 7720],
[1, 7724],
[1, 7728],
[1, 7732],
[1, 7736],
[1, 7740],
[1, 7744],
[1, 7748],
[1, 7752],
[1, 7756],
[1, 7760],
[1, 7764],
[1, 7768],
[1, 7772],
[1, 7776],
[1, 7780],
[1, 7784],
[1, 7788],
[1, 7792],
[1, 7796],
[1, 7800]]
return zip(*data)
def expected_wli01_500():
# these data were generated by Mathematica
data = [
[0.059320231, 0.00E+00],
[0.061682819, 1.389370433],
[0.064077736, 2.781040708],
[0.066503509, 4.175049145],
[0.06895868, 5.571432985],
[0.071441811, 6.97022839],
[0.073951489, 8.371470457],
[0.076486328, 9.77519323],
[0.079044969, 11.18142971],
[0.081626085, 12.59021187],
[0.084228382, 14.00157069],
[0.086850598, 15.41553615],
[0.089491509, 16.83213725],
[0.092149923, 18.25140208],
[0.094824686, 19.67335777],
[0.09751468, 21.09803057],
[0.100218824, 22.52544585],
[0.102936073, 23.95562811],
[0.10566542, 25.38860104],
[0.108405892, 26.82438752],
[0.111156554, 28.26300964],
[0.113916503, 29.70448874],
[0.116684875, 31.14884542],
[0.119460837, 32.59609958],
[0.122243588, 34.04627042],
[0.125032364, 35.49937648],
[0.127826427, 36.95543566],
[0.130625075, 38.41446525],
[0.133427632, 39.87648191],
[0.136233453, 41.34150175],
[0.139041921, 42.80954032],
[0.141852444, 44.28061262],
[0.144664459, 45.75473315],
[0.147477427, 47.23191589],
[0.150290833, 48.71217435],
[0.153104188, 50.19552158],
[0.155917021, 51.68197018],
[0.158728888, 53.17153231],
[0.161539363, 54.66421972],
[0.16434804, 56.16004379],
[0.167154534, 57.65901547],
[0.169958478, 59.16114536],
[0.172759521, 60.66644373],
[0.175557332, 62.17492047],
[0.178351595, 63.68658515],
[0.18114201, 65.20144703],
[0.183928291, 66.71951508],
[0.186710167, 68.24079794],
[0.189487383, 69.76530399],
[0.192259693, 71.29304133],
[0.195026866, 72.82401781],
[0.197788685, 74.35824102],
[0.20054494, 75.89571831],
[0.203295436, 77.43645677],
[0.206039987, 78.98046331],
[0.208778416, 80.52774459],
[0.211510559, 82.07830708],
[0.214236257, 83.63215703],
[0.216955362, 85.18930051],
[0.219667736, 86.7497434],
[0.222373246, 88.3134914],
[0.225071768, 89.88055004],
[0.227763185, 91.45092466],
[0.230447389, 93.02462048],
[0.233124275, 94.60164253],
[0.235793747, 96.18199571],
[0.238455716, 97.76568476],
[0.241110095, 99.3527143],
[0.243756806, 100.9430888],
[0.246395776, 102.5368126],
[0.249026935, 104.1338899],
[0.251650219, 105.7343248],
[0.25426557, 107.3381213],
[0.256872931, 108.9452833],
[0.259472254, 110.5558146],
[0.26206349, 112.1697187],
[0.264646598, 113.7869992],
[0.267221538, 115.4076596],
[0.269788274, 117.0317033],
[0.272346774, 118.6591335],
[0.274897008, 120.2899535],
[0.277438951, 121.9241662],
[0.27997258, 123.5617747],
[0.282497873, 125.202782],
[0.285014814, 126.8471909],
[0.287523386, 128.4950041],
[0.290023577, 130.1462244],
[0.292515376, 131.8008545],
[0.294998775, 133.4588969],
[0.297473766, 135.120354],
[0.299940347, 136.7852284],
[0.302398514, 138.4535225],
[0.304848266, 140.1252384],
[0.307289605, 141.8003786],
[0.309722533, 143.4789451],
[0.312147055, 145.1609403],
[0.314563175, 146.846366],
[0.316970902, 148.5352245],
[0.319370243, 150.2275177],
[0.321761208, 151.9232475],
[0.324143809, 153.6224159],
[0.326518057, 155.3250246],
[0.328883965, 157.0310756],
[0.331241549, 158.7405706],
[0.333590822, 160.4535113],
[0.335931802, 162.1698995],
[0.338264505, 163.8897367],
[0.34058895, 165.6130246],
[0.342905155, 167.3397647],
[0.34521314, 169.0699586],
[0.347512925, 170.8036079],
[0.349804531, 172.5407139],
[0.35208798, 174.2812781],
[0.354363295, 176.0253019],
[0.356630497, 177.7727868],
[0.358889611, 179.523734],
[0.36114066, 181.2781448],
[0.36338367, 183.0360207],
[0.365618664, 184.7973627],
[0.367845669, 186.5621722],
[0.37006471, 188.3304503],
[0.372275814, 190.1021983],
[0.374479007, 191.8774173],
[0.376674316, 193.6561084],
[0.378861769, 195.4382728],
[0.381041392, 197.2239114],
[0.383213215, 199.0130255],
[0.385377264, 200.8056161],
[0.387533569, 202.601684],
[0.389682158, 204.4012305],
[0.391823061, 206.2042564],
[0.393956305, 208.0107627],
[0.396081921, 209.8207504],
[0.398199938, 211.6342203],
[0.400310385, 213.4511734],
[0.402413293, 215.2716106],
[0.404508691, 217.0955327],
[0.406596609, 218.9229406],
[0.408677077, 220.7538351],
[0.410750126, 222.5882171],
[0.412815785, 224.4260874],
[0.414874086, 226.2674467],
[0.416925058, 228.1122958],
[0.418968733, 229.9606355],
[0.42100514, 231.8124665],
[0.42303431, 233.6677896],
[0.425056275, 235.5266055],
[0.427071064, 237.3889149],
[0.429078709, 239.2547185],
[0.431079239, 241.1240169],
[0.433072686, 242.9968108],
[0.43505908, 244.873101],
[0.437038453, 246.752888],
[0.439010834, 248.6361724],
[0.440976254, 250.5229549],
[0.442934744, 252.4132362],
[0.444886335, 254.3070167],
[0.446831057, 256.2042971],
[0.44876894, 258.105078],
[0.450700015, 260.0093599],
[0.452624313, 261.9171434],
[0.454541863, 263.8284291],
[0.456452696, 265.7432174],
[0.458356843, 267.661509],
[0.460254332, 269.5833043],
[0.462145195, 271.5086038],
[0.464029462, 273.4374081],
[0.465907161, 275.3697176],
[0.467778324, 277.3055329],
[0.46964298, 279.2448544],
[0.471501158, 281.1876826],
[0.473352889, 283.1340179],
[0.475198201, 285.0838608],
[0.477037124, 287.0372118],
[0.478869688, 288.9940713],
[0.48069592, 290.9544397],
[0.482515852, 292.9183175],
[0.484329511, 294.885705],
[0.486136927, 296.8566028],
[0.487938128, 298.8310111],
[0.489733143, 300.8089304],
[0.491522001, 302.7903611],
[0.49330473, 304.7753036],
[0.495081358, 306.7637583],
[0.496851914, 308.7557255],
[0.498616425, 310.7512056],
[0.50037492, 312.750199],
[0.502127427, 314.752706],
[0.503873974, 316.7587269],
[0.505614587, 318.7682622],
[0.507349296, 320.7813122],
[0.509078126, 322.7978772],
[0.510801106, 324.8179575],
[0.512518262, 326.8415535],
[0.514229622, 328.8686654],
[0.515935213, 330.8992937],
[0.517635061, 332.9334385],
[0.519329193, 334.9711003],
[0.521017636, 337.0122793],
[0.522700416, 339.0569759],
[0.524377559, 341.1051902],
[0.526049092, 343.1569227],
[0.527715041, 345.2121735],
[0.529375431, 347.270943],
[0.531030289, 349.3332314],
[0.532679641, 351.3990391],
[0.534323511, 353.4683662],
[0.535961925, 355.5412131],
[0.537594909, 357.61758],
[0.539222488, 359.6974672],
[0.540844686, 361.7808749],
[0.54246153, 363.8678034],
[0.544073043, 365.9582528],
[0.54567925, 368.0522236],
[0.547280176, 370.1497158],
[0.548875846, 372.2507298],
[0.550466284, 374.3552657],
[0.552051513, 376.4633238],
[0.553631558, 378.5749043],
[0.555206444, 380.6900075],
[0.556776193, 382.8086335],
[0.55834083, 384.9307826],
[0.559900377, 387.056455],
[0.56145486, 389.1856509],
[0.5630043, 391.3183705],
[0.564548721, 393.454614],
[0.566088147, 395.5943816],
[0.5676226, 397.7376735],
[0.569152102, 399.88449],
[0.570676678, 402.0348311],
[0.572196349, 404.1886971],
[0.573711138, 406.3460881],
[0.575221067, 408.5070045],
[0.576726159, 410.6714463],
[0.578226435, 412.8394136],
[0.579721918, 415.0109068],
[0.581212629, 417.185926],
[0.582698591, 419.3644713],
[0.584179825, 421.5465429],
[0.585656353, 423.732141],
[0.587128195, 425.9212658],
[0.588595374, 428.1139173],
[0.59005791, 430.3100959],
[0.591515825, 432.5098015],
[0.592969139, 434.7130345],
[0.594417874, 436.9197949],
[0.59586205, 439.1300828],
[0.597301688, 441.3438986],
[0.598736808, 443.5612422],
[0.600167431, 445.7821138],
[0.601593577, 448.0065136],
[0.603015266, 450.2344418],
[0.604432518, 452.4658984],
[0.605845354, 454.7008836],
[0.607253792, 456.9393975],
[0.608657854, 459.1814403],
[0.610057558, 461.4270121],
[0.611452924, 463.6761131],
[0.612843972, 465.9287433],
[0.61423072, 468.1849029],
[0.615613188, 470.444592],
[0.616991396, 472.7078107],
[0.618365361, 474.9745592],
[0.619735104, 477.2448375],
[0.621100642, 479.5186459],
[0.622461995, 481.7959844],
[0.62381918, 484.0768531],
[0.625172218, 486.3612521],
[0.626521125, 488.6491816],
[0.627865921, 490.9406417],
[0.629206623, 493.2356324],
[0.630543249, 495.5341539],
[0.631875818, 497.8362063],
[0.633204347, 500.1417897],
[0.634528855, 502.4509041],
[0.635849358, 504.7635498],
[0.637165875, 507.0797267],
[0.638478422, 509.3994351],
[0.639787018, 511.7226749],
[0.64109168, 514.0494463],
[0.642392425, 516.3797494],
[0.643689269, 518.7135843],
[0.644982231, 521.050951],
[0.646271326, 523.3918496],
[0.647556572, 525.7362803],
[0.648837986, 528.0842432],
[0.650115583, 530.4357382],
[0.651389382, 532.7907655],
[0.652659397, 535.1493253],
[0.653925646, 537.5114174],
[0.655188145, 539.8770422],
[0.65644691, 542.2461995],
[0.657701958, 544.6188896],
[0.658953303, 546.9951124],
[0.660200962, 549.3748681],
[0.661444952, 551.7581567],
[0.662685287, 554.1449783],
[0.663921983, 556.535333],
[0.665155056, 558.9292208],
[0.666384522, 561.3266419],
[0.667610396, 563.7275963],
[0.668832693, 566.132084],
[0.670051429, 568.5401051],
[0.671266618, 570.9516597],
[0.672478275, 573.3667479],
[0.673686417, 575.7853697],
[0.674891057, 578.2075252],
[0.676092211, 580.6332144],
[0.677289893, 583.0624374],
[0.678484118, 585.4951943],
[0.6796749, 587.9314851],
[0.680862255, 590.3713099],
[0.682046196, 592.8146688],
[0.683226737, 595.2615617],
[0.684403894, 597.7119887],
[0.685577681, 600.16595],
[0.686748111, 602.6234455],
[0.687915198, 605.0844753],
[0.689078957, 607.5490395],
[0.690239401, 610.0171381],
[0.691396545, 612.4887711],
[0.692550401, 614.9639386],
[0.693700984, 617.4426407],
[0.694848308, 619.9248773],
[0.695992385, 622.4106486],
[0.69713323, 624.8999546],
[0.698270855, 627.3927953],
[0.699405275, 629.8891708],
[0.700536501, 632.389081],
[0.701664548, 634.8925262],
[0.702789429, 637.3995062],
[0.703911156, 639.9100211],
[0.705029742, 642.424071],
[0.706145201, 644.9416559],
[0.707257545, 647.4627758],
[0.708366788, 649.9874308],
[0.70947294, 652.515621],
[0.710576016, 655.0473462],
[0.711676028, 657.5826066],
[0.712772989, 660.1214022],
[0.71386691, 662.6637331],
[0.714957804, 665.2095992],
[0.716045683, 667.7590006],
[0.71713056, 670.3119373],
[0.718212447, 672.8684094],
[0.719291356, 675.4284169],
[0.720367299, 677.9919597],
[0.721440287, 680.559038],
[0.722510334, 683.1296517],
[0.723577449, 685.7038009],
[0.724641647, 688.2814856],
[0.725702937, 690.8627058],
[0.726761332, 693.4474616],
[0.727816844, 696.0357529],
[0.728869483, 698.6275797],
[0.729919262, 701.2229422],
[0.730966191, 703.8218403],
[0.732010282, 706.424274],
[0.733051547, 709.0302434],
[0.734089997, 711.6397484],
[0.735125642, 714.2527891],
[0.736158494, 716.8693655],
[0.737188564, 719.4894775],
[0.738215864, 722.1131253],
[0.739240403, 724.7403089],
[0.740262192, 727.3710281],
[0.741281244, 730.0052831],
[0.742297568, 732.6430739],
[0.743311174, 735.2844004],
[0.744322075, 737.9292627],
[0.74533028, 740.5776608],
[0.7463358, 743.2295946],
[0.747338645, 745.8850642],
[0.748338826, 748.5440697],
[0.749336353, 751.2066109],
[0.750331237, 753.8726879],
[0.751323488, 756.5423007],
[0.752313116, 759.2154493],
[0.75330013, 761.8921338],
[0.754284543, 764.572354],
[0.755266362, 767.25611],
[0.756245599, 769.9434018],
[0.757222264, 772.6342295],
[0.758196365, 775.3285929],
[0.759167914, 778.0264921],
[0.76013692, 780.727927],
[0.761103392, 783.4328978],
[0.762067341, 786.1414043],
[0.763028776, 788.8534466],
[0.763987707, 791.5690247],
[0.764944143, 794.2881384],
[0.765898093, 797.010788],
[0.766849568, 799.7369732],
[0.767798577, 802.4666942],
[0.768745128, 805.1999508],
[0.769689232, 807.9367432],
[0.770630897, 810.6770712],
[0.771570134, 813.4209349],
[0.77250695, 816.1683343],
[0.773441355, 818.9192692],
[0.774373358, 821.6737398],
[0.775302969, 824.431746],
[0.776230196, 827.1932877],
[0.777155048, 829.9583651],
[0.778077534, 832.7269779],
[0.778997663, 835.4991263],
[0.779915444, 838.2748101],
[0.780830885, 841.0540294],
[0.781743996, 843.8367842],
[0.782654784, 846.6230744],
[0.783563259, 849.4129],
[0.784469429, 852.206261],
[0.785373303, 855.0031573],
[0.786274889, 857.803589],
[0.787174195, 860.6075559],
[0.788071231, 863.4150581],
[0.788966004, 866.2260955],
[0.789858522, 869.0406681],
[0.790748795, 871.8587759],
[0.79163683, 874.6804188],
[0.792522635, 877.5055969],
[0.793406219, 880.33431],
[0.79428759, 883.1665581],
[0.795166756, 886.0023412],
[0.796043724, 888.8416593],
[0.796918504, 891.6845123],
[0.797791102, 894.5309002],
[0.798661527, 897.3808229],
[0.799529787, 900.2342804],
[0.800395889, 903.0912727],
[0.801259841, 905.9517997],
[0.802121652, 908.8158614],
[0.802981328, 911.6834576],
[0.803838878, 914.5545885],
[0.804694309, 917.4292539],
[0.805547629, 920.3074538],
[0.806398845, 923.1891882],
[0.807247965, 926.0744569],
[0.808094996, 928.96326],
[0.808939946, 931.8555973],
[0.809782822, 934.7514689],
[0.810623632, 937.6508747],
[0.811462383, 940.5538146],
[0.812299082, 943.4602886],
[0.813133736, 946.3702966],
[0.813966354, 949.2838385],
[0.814796941, 952.2009144],
[0.815625505, 955.121524],
[0.816452054, 958.0456675],
[0.817276594, 960.9733447],
[0.818099132, 963.9045555],
[0.818919676, 966.8393],
[0.819738233, 969.7775779],
[0.820554808, 972.7193893],
[0.82136941, 975.6647341],
[0.822182045, 978.6136123],
[0.82299272, 981.5660236],
[0.823801442, 984.5219682],
[0.824608218, 987.4814459],
[0.825413054, 990.4444566],
[0.826215957, 993.4110003],
[0.827016934, 996.3810768],
[0.827815991, 999.3546862],
[0.828613135, 1002.331828],
[0.829408373, 1005.312503],
[0.830201711, 1008.29671],
[0.830993155, 1011.28445],
[0.831782713, 1014.275722],
[0.83257039, 1017.270527],
[0.833356194, 1020.268864],
[0.834140129, 1023.270733],
[0.834922204, 1026.276134],
[0.835702423, 1029.285067],
[0.836480794, 1032.297532],
[0.837257322, 1035.313528],
[0.838032014, 1038.333056],
[0.838804876, 1041.356116],
[0.839575914, 1044.382708],
[0.840345135, 1047.412831],
[0.841112544, 1050.446485],
[0.841878147, 1053.48367],
[0.842641951, 1056.524387],
[0.843403961, 1059.568634],
[0.844164184, 1062.616412],
[0.844922625, 1065.667721],
[0.845679291, 1068.722561],
[0.846434186, 1071.780932],
[0.847187318, 1074.842832],
[0.847938692, 1077.908263],
[0.848688313, 1080.977225],
[0.849436188, 1084.049716],
[0.850182322, 1087.125738],
[0.850926721, 1090.205289],
[0.85166939, 1093.28837],
[0.852410335, 1096.374981],
[0.853149563, 1099.465121],
[0.853887077, 1102.55879],
[0.854622885, 1105.655989],
[0.855356991, 1108.756717],
[0.856089401, 1111.860974],
[0.856820121, 1114.968759],
[0.857549155, 1118.080074],
[0.85827651, 1121.194917],
[0.85900219, 1124.313288],
[0.859726202, 1127.435187],
[0.860448549, 1130.560615],
[0.861169239, 1133.689571],
[0.861888275, 1136.822054],
[0.862605664, 1139.958065],
[0.863321409, 1143.097604],
[0.864035518, 1146.24067],
[0.864747994, 1149.387264],
[0.865458843, 1152.537384],
[0.86616807, 1155.691031],
[0.86687568, 1158.848205],
[0.867581678, 1162.008906],
[0.868286069, 1165.173132],
[0.868988858, 1168.340886],
[0.86969005, 1171.512165],
[0.87038965, 1174.68697],
[0.871087663, 1177.8653],
[0.871784093, 1181.047157],
[0.872478946, 1184.232538],
[0.873172227, 1187.421445],
[0.87386394, 1190.613876],
[0.874554089, 1193.809832],
[0.875242681, 1197.009313],
[0.875929718, 1200.212318],
[0.876615207, 1203.418848],
[0.877299152, 1206.628901],
[0.877981557, 1209.842478],
[0.878662427, 1213.059578],
[0.879341766, 1216.280202],
[0.880019579, 1219.504349],
[0.880695871, 1222.732019],
[0.881370646, 1225.963211],
[0.882043909, 1229.197926],
[0.882715664, 1232.436163],
[0.883385914, 1235.677922],
[0.884054666, 1238.923202],
[0.884721923, 1242.172004],
[0.885387689, 1245.424328],
[0.886051969, 1248.680172],
[0.886714767, 1251.939537],
[0.887376087, 1255.202422],
[0.888035933, 1258.468828],
[0.88869431, 1261.738754],
[0.889351222, 1265.012199],
[0.890006672, 1268.289164],
[0.890660666, 1271.569648],
[0.891313206, 1274.853651],
[0.891964298, 1278.141172],
[0.892613944, 1281.432212],
[0.89326215, 1284.726769],
[0.893908918, 1288.024845],
[0.894554254, 1291.326437],
[0.89519816, 1294.631547],
[0.895840641, 1297.940174],
[0.8964817, 1301.252317],
[0.897121342, 1304.567976],
[0.89775957, 1307.887151],
[0.898396387, 1311.209841],
[0.899031798, 1314.536047],
[0.899665806, 1317.865768],
[0.900298415, 1321.199002],
[0.900929629, 1324.535751],
[0.90155945, 1327.876014],
[0.902187883, 1331.21979],
[0.902814931, 1334.56708],
[0.903440598, 1337.917881],
[0.904064886, 1341.272195],
[0.904687801, 1344.630021],
[0.905309344, 1347.991359],
[0.905929519, 1351.356207],
[0.90654833, 1354.724566],
[0.90716578, 1358.096436],
[0.907781872, 1361.471815],
[0.908396609, 1364.850704],
[0.909009995, 1368.233101],
[0.909622032, 1371.619008],
[0.910232724, 1375.008422],
[0.910842074, 1378.401344],
[0.911450085, 1381.797773],
[0.91205676, 1385.197709],
[0.912662102, 1388.60115],
[0.913266114, 1392.008098],
[0.913868799, 1395.418551],
[0.914470159, 1398.832509],
[0.915070197, 1402.24997],
[0.915668917, 1405.670936],
[0.916266321, 1409.095404],
[0.916862412, 1412.523376],
[0.917457192, 1415.954849],
[0.918050663, 1419.389824],
[0.91864283, 1422.828299],
[0.919233693, 1426.270275],
[0.919823256, 1429.715751],
[0.920411521, 1433.164726],
[0.92099849, 1436.617199],
[0.921584167, 1440.07317],
[0.922168552, 1443.532639],
[0.922751648, 1446.995604],
[0.923333458, 1450.462065],
[0.923913984, 1453.932022],
[0.924493228, 1457.405473],
[0.925071191, 1460.882417],
[0.925647876, 1464.362855],
[0.926223285, 1467.846786],
[0.92679742, 1471.334208],
[0.927370282, 1474.825121],
[0.927941874, 1478.319525],
[0.928512197, 1481.817418],
[0.929081253, 1485.318799],
[0.929649042, 1488.823668],
[0.930215568, 1492.332024],
[0.930780832, 1495.843867],
[0.931344834, 1499.359194],
[0.931907576, 1502.878006],
[0.93246906, 1506.400302],
[0.933029286, 1509.92608],
[0.933588257, 1513.45534],
[0.934145972, 1516.98808],
[0.934702433, 1520.5243],
[0.935257642, 1524.063999],
[0.935811597, 1527.607176],
[0.936364302, 1531.153829],
[0.936915755, 1534.703958],
[0.937465959, 1538.257561],
[0.938014913, 1541.814638],
[0.938562617, 1545.375188],
[0.939109073, 1548.939208],
[0.93965428, 1552.506699],
[0.940198238, 1556.077658],
[0.940740948, 1559.652085],
[0.94128241, 1563.229978],
[0.941822623, 1566.811336],
[0.942361587, 1570.396158],
[0.942899302, 1573.984443],
[0.943435767, 1577.576188],
[0.943970982, 1581.171394],
[0.944504946, 1584.770057],
[0.945037658, 1588.372177],
[0.945569117, 1591.977753],
[0.946099323, 1595.586782],
[0.946628273, 1599.199264],
[0.947155967, 1602.815196],
[0.947682402, 1606.434577],
[0.948207579, 1610.057405],
[0.948731493, 1613.683679],
[0.949254144, 1617.313397],
[0.94977553, 1620.946556],
[0.950295648, 1624.583156],
[0.950814495, 1628.223195],
[0.95133207, 1631.866669],
[0.951848368, 1635.513578],
[0.952363388, 1639.163919],
[0.952877126, 1642.817691],
[0.953389578, 1646.474891],
[0.953900741, 1650.135516],
[0.954410611, 1653.799566],
[0.954919183, 1657.467036],
[0.955426455, 1661.137926],
[0.95593242, 1664.812233],
[0.956437074, 1668.489953],
[0.956940412, 1672.171085],
[0.957442429, 1675.855626],
[0.957943119, 1679.543574],
[0.958442476, 1683.234925],
[0.958940494, 1686.929677],
[0.959437166, 1690.627826],
[0.959932486, 1694.32937],
[0.960426446, 1698.034306],
[0.960919038, 1701.742631],
[0.961410255, 1705.454341],
[0.961900088, 1709.169433],
[0.962388529, 1712.887904],
[0.962875569, 1716.609749],
[0.963361197, 1720.334966],
[0.963845405, 1724.063551],
[0.964328181, 1727.7955],
[0.964809515, 1731.530809],
[0.965289396, 1735.269473],
[0.96576781, 1739.011489],
[0.966244747, 1742.756853],
[0.966720193, 1746.505559],
[0.967194134, 1750.257604],
[0.967666556, 1754.012982],
[0.968137445, 1757.77169],
[0.968606785, 1761.533721],
[0.96907456, 1765.29907],
[0.969540753, 1769.067733],
[0.970005346, 1772.839704],
[0.970468322, 1776.614977],
[0.970929661, 1780.393547],
[0.971389343, 1784.175407],
[0.971847347, 1787.960551],
[0.972303652, 1791.748973],
[0.972758236, 1795.540665],
[0.973211074, 1799.335623],
[0.973662143, 1803.133837],
[0.974111416, 1806.935302],
[0.974558867, 1810.740008],
[0.975004468, 1814.54795],
[0.97544819, 1818.359118],
[0.975890004, 1822.173505],
[0.976329877, 1825.991101],
[0.976767777, 1829.811898],
[0.97720367, 1833.635887],
[0.97763752, 1837.463058],
[0.97806929, 1841.293401],
[0.978498943, 1845.126907],
[0.978926437, 1848.963565],
[0.979351731, 1852.803363],
[0.979774782, 1856.646291],
[0.980195544, 1860.492336],
[0.980613971, 1864.341488],
[0.981030013, 1868.193733],
[0.98144362, 1872.049058],
[0.981854739, 1875.907451],
[0.982263316, 1879.768896],
[0.982669293, 1883.633381],
[0.98307261, 1887.500889],
[0.983473207, 1891.371406],
[0.98387102, 1895.244916],
[0.984265982, 1899.121402],
[0.984658026, 1903.000846],
[0.985047079, 1906.883232],
[0.985433069, 1910.76854],
[0.985815918, 1914.656752],
[0.986195549, 1918.547849],
[0.98657188, 1922.441809],
[0.986944826, 1926.338611],
[0.9873143, 1930.238234],
[0.987680214, 1934.140654],
[0.988042474, 1938.045849],
[0.988400986, 1941.953794],
[0.988755651, 1945.864464],
[0.989106369, 1949.777833],
[0.989453038, 1953.693873],
[0.989795551, 1957.612558],
[0.990133801, 1961.533858],
[0.990467677, 1965.457743],
[0.990797067, 1969.384183],
[0.991121855, 1973.313147],
[0.991441926, 1977.2446],
[0.991757162, 1981.17851],
[0.992067443, 1985.114842],
[0.992372649, 1989.05356],
[0.992672658, 1992.994626],
[0.992967349, 1996.938002],
[0.9932566, 2000.883651],
[0.993540291, 2004.83153],
[0.993818301, 2008.7816],
[0.99409051, 2012.733817],
[0.994356803, 2016.688138],
[0.994617064, 2020.644518],
[0.994871183, 2024.602913],
[0.995119053, 2028.563275],
[0.995360571, 2032.525556],
[0.99559564, 2036.48971],
[0.995824168, 2040.455685],
[0.996046071, 2044.423433],
[0.996261273, 2048.392902],
[0.996469704, 2052.36404],
[0.996671307, 2056.336796],
[0.996866031, 2060.311116],
[0.997053838, 2064.286947],
[0.997234702, 2068.264235],
[0.997408607, 2072.242927],
[0.997575552, 2076.222969],
[0.997735549, 2080.204305],
[0.997888621, 2084.186883],
[0.99803481, 2088.170647],
[0.998174168, 2092.155545],
[0.998306765, 2096.141522],
[0.998432682, 2100.128527],
[0.99855202, 2104.116506],
[0.998664889, 2108.105409],
[0.998771416, 2112.095185],
[0.998871742, 2116.085784],
[0.99896602, 2120.077157],
[0.999054414, 2124.069259],
[0.999137102, 2128.062042],
[0.99921427, 2132.055461],
[0.999286116, 2136.049475],
[0.999352844, 2140.044042],
[0.999414664, 2144.039121],
[0.999471796, 2148.034675],
[0.999524458, 2152.030667],
[0.999572877, 2156.027062],
[0.999617278, 2160.023828],
[0.999657885, 2164.020934],
[0.999694925, 2168.018349],
[0.99972862, 2172.016046],
[0.999759187, 2176.014001],
[0.999786842, 2180.012188],
[0.999811792, 2184.010584],
[0.99983424, 2188.009171],
[0.999854379, 2192.007927],
[0.999872397, 2196.006836],
[0.999888472, 2200.005881],
[0.999902771, 2204.005047],
[0.999915456, 2208.004321],
[0.999926676, 2212.003691],
[0.999936572, 2216.003145],
[0.999945275, 2220.002673],
[0.999952907, 2224.002266],
[0.999959581, 2228.001917],
[0.9999654, 2232.001617],
[0.999970459, 2236.001361],
[0.999974845, 2240.001143],
[0.999978636, 2244.000957],
[0.999981904, 2248.0008],
[0.999984712, 2252.000666],
[0.999987119, 2256.000554],
[0.999989175, 2260.000459],
[0.999990928, 2264.00038],
[0.999992416, 2268.000313],
[0.999993678, 2272.000258],
[0.999994743, 2276.000212],
[0.999995641, 2280.000173],
[0.999996395, 2284.000142],
[0.999997026, 2288.000115],
[0.999997553, 2292.000094],
[0.999997993, 2296.000076],
[0.999998357, 2300.000061],
[0.999998659, 2304.00005],
[0.999998909, 2308.00004],
[0.999999114, 2312.000032],
[0.999999283, 2316.000026],
[0.999999421, 2320.00002],
[0.999999534, 2324.000016],
[0.999999625, 2328.000013],
[0.9999997, 2332.00001],
[0.99999976, 2336.000008],
[0.999999809, 2340.000006],
[0.999999848, 2344.000005],
[0.99999988, 2348.000004],
[0.999999905, 2352.000003],
[0.999999925, 2356.000002],
[0.999999941, 2360.000002],
[0.999999954, 2364.000001],
[0.999999964, 2368.000001],
[0.999999972, 2372.000001],
[0.999999978, 2376.000001],
[0.999999983, 2380.000001],
[0.999999987, 2384],
[0.99999999, 2388],
[0.999999992, 2392],
[0.999999994, 2396],
[0.999999995, 2400],
[0.999999996, 2404],
[0.999999997, 2408],
[0.999999998, 2412],
[0.999999998, 2416],
[0.999999999, 2420],
[0.999999999, 2424],
[0.999999999, 2428],
[1, 2432],
[1, 2436],
[1, 2440],
[1, 2444],
[1, 2448],
[1, 2452],
[1, 2456],
[1, 2460],
[1, 2464],
[1, 2468],
[1, 2472],
[1, 2476],
[1, 2480],
[1, 2484],
[1, 2488],
[1, 2492],
[1, 2496],
[1, 2500],
[1, 2504],
[1, 2508],
[1, 2512],
[1, 2516],
[1, 2520],
[1, 2524],
[1, 2528],
[1, 2532],
[1, 2536],
[1, 2540],
[1, 2544],
[1, 2548],
[1, 2552],
[1, 2556],
[1, 2560],
[1, 2564],
[1, 2568],
[1, 2572],
[1, 2576],
[1, 2580],
[1, 2584],
[1, 2588],
[1, 2592],
[1, 2596],
[1, 2600],
[1, 2604],
[1, 2608],
[1, 2612],
[1, 2616],
[1, 2620],
[1, 2624],
[1, 2628],
[1, 2632],
[1, 2636],
[1, 2640],
[1, 2644],
[1, 2648],
[1, 2652],
[1, 2656],
[1, 2660],
[1, 2664],
[1, 2668],
[1, 2672],
[1, 2676],
[1, 2680],
[1, 2684],
[1, 2688],
[1, 2692],
[1, 2696],
[1, 2700],
[1, 2704],
[1, 2708],
[1, 2712],
[1, 2716],
[1, 2720],
[1, 2724],
[1, 2728],
[1, 2732],
[1, 2736],
[1, 2740],
[1, 2744],
[1, 2748],
[1, 2752],
[1, 2756],
[1, 2760],
[1, 2764],
[1, 2768],
[1, 2772],
[1, 2776],
[1, 2780],
[1, 2784],
[1, 2788],
[1, 2792],
[1, 2796],
[1, 2800],
[1, 2804],
[1, 2808],
[1, 2812],
[1, 2816],
[1, 2820],
[1, 2824],
[1, 2828],
[1, 2832],
[1, 2836],
[1, 2840],
[1, 2844],
[1, 2848],
[1, 2852],
[1, 2856],
[1, 2860],
[1, 2864],
[1, 2868],
[1, 2872],
[1, 2876],
[1, 2880],
[1, 2884],
[1, 2888],
[1, 2892],
[1, 2896],
[1, 2900],
[1, 2904],
[1, 2908],
[1, 2912],
[1, 2916],
[1, 2920],
[1, 2924],
[1, 2928],
[1, 2932],
[1, 2936],
[1, 2940],
[1, 2944],
[1, 2948],
[1, 2952],
[1, 2956],
[1, 2960],
[1, 2964],
[1, 2968],
[1, 2972],
[1, 2976],
[1, 2980],
[1, 2984],
[1, 2988],
[1, 2992],
[1, 2996],
[1, 3000],
[1, 3004],
[1, 3008],
[1, 3012],
[1, 3016],
[1, 3020],
[1, 3024],
[1, 3028],
[1, 3032],
[1, 3036],
[1, 3040],
[1, 3044],
[1, 3048],
[1, 3052],
[1, 3056],
[1, 3060],
[1, 3064],
[1, 3068],
[1, 3072],
[1, 3076],
[1, 3080],
[1, 3084],
[1, 3088],
[1, 3092],
[1, 3096],
[1, 3100],
[1, 3104],
[1, 3108],
[1, 3112],
[1, 3116],
[1, 3120],
[1, 3124],
[1, 3128],
[1, 3132],
[1, 3136],
[1, 3140],
[1, 3144],
[1, 3148],
[1, 3152],
[1, 3156],
[1, 3160],
[1, 3164],
[1, 3168],
[1, 3172],
[1, 3176],
[1, 3180],
[1, 3184],
[1, 3188],
[1, 3192],
[1, 3196],
[1, 3200],
[1, 3204],
[1, 3208],
[1, 3212],
[1, 3216],
[1, 3220],
[1, 3224],
[1, 3228],
[1, 3232],
[1, 3236],
[1, 3240],
[1, 3244],
[1, 3248],
[1, 3252],
[1, 3256],
[1, 3260],
[1, 3264],
[1, 3268],
[1, 3272],
[1, 3276],
[1, 3280],
[1, 3284],
[1, 3288],
[1, 3292],
[1, 3296],
[1, 3300],
[1, 3304],
[1, 3308],
[1, 3312],
[1, 3316],
[1, 3320],
[1, 3324],
[1, 3328],
[1, 3332],
[1, 3336],
[1, 3340],
[1, 3344],
[1, 3348],
[1, 3352],
[1, 3356],
[1, 3360],
[1, 3364],
[1, 3368],
[1, 3372],
[1, 3376],
[1, 3380],
[1, 3384],
[1, 3388],
[1, 3392],
[1, 3396],
[1, 3400],
[1, 3404],
[1, 3408],
[1, 3412],
[1, 3416],
[1, 3420],
[1, 3424],
[1, 3428],
[1, 3432],
[1, 3436],
[1, 3440],
[1, 3444],
[1, 3448],
[1, 3452],
[1, 3456],
[1, 3460],
[1, 3464],
[1, 3468],
[1, 3472],
[1, 3476],
[1, 3480],
[1, 3484],
[1, 3488],
[1, 3492],
[1, 3496],
[1, 3500],
[1, 3504],
[1, 3508],
[1, 3512],
[1, 3516],
[1, 3520],
[1, 3524],
[1, 3528],
[1, 3532],
[1, 3536],
[1, 3540],
[1, 3544],
[1, 3548],
[1, 3552],
[1, 3556],
[1, 3560],
[1, 3564],
[1, 3568],
[1, 3572],
[1, 3576],
[1, 3580],
[1, 3584],
[1, 3588],
[1, 3592],
[1, 3596],
[1, 3600],
[1, 3604],
[1, 3608],
[1, 3612],
[1, 3616],
[1, 3620],
[1, 3624],
[1, 3628],
[1, 3632],
[1, 3636],
[1, 3640],
[1, 3644],
[1, 3648],
[1, 3652],
[1, 3656],
[1, 3660],
[1, 3664],
[1, 3668],
[1, 3672],
[1, 3676],
[1, 3680],
[1, 3684],
[1, 3688],
[1, 3692],
[1, 3696],
[1, 3700],
[1, 3704],
[1, 3708],
[1, 3712],
[1, 3716],
[1, 3720],
[1, 3724],
[1, 3728],
[1, 3732],
[1, 3736],
[1, 3740],
[1, 3744],
[1, 3748],
[1, 3752],
[1, 3756],
[1, 3760],
[1, 3764],
[1, 3768],
[1, 3772],
[1, 3776],
[1, 3780],
[1, 3784],
[1, 3788],
[1, 3792],
[1, 3796],
[1, 3800],
[1, 3804],
[1, 3808],
[1, 3812],
[1, 3816],
[1, 3820],
[1, 3824],
[1, 3828],
[1, 3832],
[1, 3836],
[1, 3840],
[1, 3844],
[1, 3848],
[1, 3852],
[1, 3856],
[1, 3860],
[1, 3864],
[1, 3868],
[1, 3872],
[1, 3876],
[1, 3880],
[1, 3884],
[1, 3888],
[1, 3892],
[1, 3896],
[1, 3900],
[1, 3904],
[1, 3908],
[1, 3912],
[1, 3916],
[1, 3920],
[1, 3924],
[1, 3928],
[1, 3932],
[1, 3936],
[1, 3940],
[1, 3944],
[1, 3948],
[1, 3952],
[1, 3956],
[1, 3960],
[1, 3964],
[1, 3968],
[1, 3972],
[1, 3976],
[1, 3980],
[1, 3984],
[1, 3988],
[1, 3992],
[1, 3996],
[1, 4000],
[1, 4004],
[1, 4008],
[1, 4012],
[1, 4016],
[1, 4020],
[1, 4024],
[1, 4028],
[1, 4032],
[1, 4036],
[1, 4040],
[1, 4044],
[1, 4048],
[1, 4052],
[1, 4056],
[1, 4060],
[1, 4064],
[1, 4068],
[1, 4072],
[1, 4076],
[1, 4080],
[1, 4084],
[1, 4088],
[1, 4092],
[1, 4096],
[1, 4100],
[1, 4104],
[1, 4108],
[1, 4112],
[1, 4116],
[1, 4120],
[1, 4124],
[1, 4128],
[1, 4132],
[1, 4136],
[1, 4140],
[1, 4144],
[1, 4148],
[1, 4152],
[1, 4156],
[1, 4160],
[1, 4164],
[1, 4168],
[1, 4172],
[1, 4176],
[1, 4180],
[1, 4184],
[1, 4188],
[1, 4192],
[1, 4196],
[1, 4200],
[1, 4204],
[1, 4208],
[1, 4212],
[1, 4216],
[1, 4220],
[1, 4224],
[1, 4228],
[1, 4232],
[1, 4236],
[1, 4240],
[1, 4244],
[1, 4248],
[1, 4252],
[1, 4256],
[1, 4260],
[1, 4264],
[1, 4268],
[1, 4272],
[1, 4276],
[1, 4280],
[1, 4284],
[1, 4288],
[1, 4292],
[1, 4296],
[1, 4300],
[1, 4304],
[1, 4308],
[1, 4312],
[1, 4316],
[1, 4320],
[1, 4324],
[1, 4328],
[1, 4332],
[1, 4336],
[1, 4340],
[1, 4344],
[1, 4348],
[1, 4352],
[1, 4356],
[1, 4360],
[1, 4364],
[1, 4368],
[1, 4372],
[1, 4376],
[1, 4380],
[1, 4384],
[1, 4388],
[1, 4392],
[1, 4396],
[1, 4400],
[1, 4404],
[1, 4408],
[1, 4412],
[1, 4416],
[1, 4420],
[1, 4424],
[1, 4428],
[1, 4432],
[1, 4436],
[1, 4440],
[1, 4444],
[1, 4448],
[1, 4452],
[1, 4456],
[1, 4460],
[1, 4464],
[1, 4468],
[1, 4472],
[1, 4476],
[1, 4480],
[1, 4484],
[1, 4488],
[1, 4492],
[1, 4496],
[1, 4500],
[1, 4504],
[1, 4508],
[1, 4512],
[1, 4516],
[1, 4520],
[1, 4524],
[1, 4528],
[1, 4532],
[1, 4536],
[1, 4540],
[1, 4544],
[1, 4548],
[1, 4552],
[1, 4556],
[1, 4560],
[1, 4564],
[1, 4568],
[1, 4572],
[1, 4576],
[1, 4580],
[1, 4584],
[1, 4588],
[1, 4592],
[1, 4596],
[1, 4600],
[1, 4604],
[1, 4608],
[1, 4612],
[1, 4616],
[1, 4620],
[1, 4624],
[1, 4628],
[1, 4632],
[1, 4636],
[1, 4640],
[1, 4644],
[1, 4648],
[1, 4652],
[1, 4656],
[1, 4660],
[1, 4664],
[1, 4668],
[1, 4672],
[1, 4676],
[1, 4680],
[1, 4684],
[1, 4688],
[1, 4692],
[1, 4696],
[1, 4700],
[1, 4704],
[1, 4708],
[1, 4712],
[1, 4716],
[1, 4720],
[1, 4724],
[1, 4728],
[1, 4732],
[1, 4736],
[1, 4740],
[1, 4744],
[1, 4748],
[1, 4752],
[1, 4756],
[1, 4760],
[1, 4764],
[1, 4768],
[1, 4772],
[1, 4776],
[1, 4780],
[1, 4784],
[1, 4788],
[1, 4792],
[1, 4796],
[1, 4800],
[1, 4804],
[1, 4808],
[1, 4812],
[1, 4816],
[1, 4820],
[1, 4824],
[1, 4828],
[1, 4832],
[1, 4836],
[1, 4840],
[1, 4844],
[1, 4848],
[1, 4852],
[1, 4856],
[1, 4860],
[1, 4864],
[1, 4868],
[1, 4872],
[1, 4876],
[1, 4880],
[1, 4884],
[1, 4888],
[1, 4892],
[1, 4896],
[1, 4900],
[1, 4904],
[1, 4908],
[1, 4912],
[1, 4916],
[1, 4920],
[1, 4924],
[1, 4928],
[1, 4932],
[1, 4936],
[1, 4940],
[1, 4944],
[1, 4948],
[1, 4952],
[1, 4956],
[1, 4960],
[1, 4964],
[1, 4968],
[1, 4972],
[1, 4976],
[1, 4980],
[1, 4984],
[1, 4988],
[1, 4992],
[1, 4996],
[1, 5000],
[1, 5004],
[1, 5008],
[1, 5012],
[1, 5016],
[1, 5020],
[1, 5024],
[1, 5028],
[1, 5032],
[1, 5036],
[1, 5040],
[1, 5044],
[1, 5048],
[1, 5052],
[1, 5056],
[1, 5060],
[1, 5064],
[1, 5068],
[1, 5072],
[1, 5076],
[1, 5080],
[1, 5084],
[1, 5088],
[1, 5092],
[1, 5096],
[1, 5100],
[1, 5104],
[1, 5108],
[1, 5112],
[1, 5116],
[1, 5120],
[1, 5124],
[1, 5128],
[1, 5132],
[1, 5136],
[1, 5140],
[1, 5144],
[1, 5148],
[1, 5152],
[1, 5156],
[1, 5160],
[1, 5164],
[1, 5168],
[1, 5172],
[1, 5176],
[1, 5180],
[1, 5184],
[1, 5188],
[1, 5192],
[1, 5196],
[1, 5200],
[1, 5204],
[1, 5208],
[1, 5212],
[1, 5216],
[1, 5220],
[1, 5224],
[1, 5228],
[1, 5232],
[1, 5236],
[1, 5240],
[1, 5244],
[1, 5248],
[1, 5252],
[1, 5256],
[1, 5260],
[1, 5264],
[1, 5268],
[1, 5272],
[1, 5276],
[1, 5280],
[1, 5284],
[1, 5288],
[1, 5292],
[1, 5296],
[1, 5300],
[1, 5304],
[1, 5308],
[1, 5312],
[1, 5316],
[1, 5320],
[1, 5324],
[1, 5328],
[1, 5332],
[1, 5336],
[1, 5340],
[1, 5344],
[1, 5348],
[1, 5352],
[1, 5356],
[1, 5360],
[1, 5364],
[1, 5368],
[1, 5372],
[1, 5376],
[1, 5380],
[1, 5384],
[1, 5388],
[1, 5392],
[1, 5396],
[1, 5400],
[1, 5404],
[1, 5408],
[1, 5412],
[1, 5416],
[1, 5420],
[1, 5424],
[1, 5428],
[1, 5432],
[1, 5436],
[1, 5440],
[1, 5444],
[1, 5448],
[1, 5452],
[1, 5456],
[1, 5460],
[1, 5464],
[1, 5468],
[1, 5472],
[1, 5476],
[1, 5480],
[1, 5484],
[1, 5488],
[1, 5492],
[1, 5496],
[1, 5500],
[1, 5504],
[1, 5508],
[1, 5512],
[1, 5516],
[1, 5520],
[1, 5524],
[1, 5528],
[1, 5532],
[1, 5536],
[1, 5540],
[1, 5544],
[1, 5548],
[1, 5552],
[1, 5556],
[1, 5560],
[1, 5564],
[1, 5568],
[1, 5572],
[1, 5576],
[1, 5580],
[1, 5584],
[1, 5588],
[1, 5592],
[1, 5596],
[1, 5600],
[1, 5604],
[1, 5608],
[1, 5612],
[1, 5616],
[1, 5620],
[1, 5624],
[1, 5628],
[1, 5632],
[1, 5636],
[1, 5640],
[1, 5644],
[1, 5648],
[1, 5652],
[1, 5656],
[1, 5660],
[1, 5664],
[1, 5668],
[1, 5672],
[1, 5676],
[1, 5680],
[1, 5684],
[1, 5688],
[1, 5692],
[1, 5696],
[1, 5700],
[1, 5704],
[1, 5708],
[1, 5712],
[1, 5716],
[1, 5720],
[1, 5724],
[1, 5728],
[1, 5732],
[1, 5736],
[1, 5740],
[1, 5744],
[1, 5748],
[1, 5752],
[1, 5756],
[1, 5760],
[1, 5764],
[1, 5768],
[1, 5772],
[1, 5776],
[1, 5780],
[1, 5784],
[1, 5788],
[1, 5792],
[1, 5796],
[1, 5800],
[1, 5804],
[1, 5808],
[1, 5812],
[1, 5816],
[1, 5820],
[1, 5824],
[1, 5828],
[1, 5832],
[1, 5836],
[1, 5840],
[1, 5844],
[1, 5848],
[1, 5852],
[1, 5856],
[1, 5860],
[1, 5864],
[1, 5868],
[1, 5872],
[1, 5876],
[1, 5880],
[1, 5884],
[1, 5888],
[1, 5892],
[1, 5896],
[1, 5900],
[1, 5904],
[1, 5908],
[1, 5912],
[1, 5916],
[1, 5920],
[1, 5924],
[1, 5928],
[1, 5932],
[1, 5936],
[1, 5940],
[1, 5944],
[1, 5948],
[1, 5952],
[1, 5956],
[1, 5960],
[1, 5964],
[1, 5968],
[1, 5972],
[1, 5976],
[1, 5980],
[1, 5984],
[1, 5988],
[1, 5992],
[1, 5996],
[1, 6000],
[1, 6004],
[1, 6008],
[1, 6012],
[1, 6016],
[1, 6020],
[1, 6024],
[1, 6028],
[1, 6032],
[1, 6036],
[1, 6040],
[1, 6044],
[1, 6048],
[1, 6052],
[1, 6056],
[1, 6060],
[1, 6064],
[1, 6068],
[1, 6072],
[1, 6076],
[1, 6080],
[1, 6084],
[1, 6088],
[1, 6092],
[1, 6096],
[1, 6100],
[1, 6104],
[1, 6108],
[1, 6112],
[1, 6116],
[1, 6120],
[1, 6124],
[1, 6128],
[1, 6132],
[1, 6136],
[1, 6140],
[1, 6144],
[1, 6148],
[1, 6152],
[1, 6156],
[1, 6160],
[1, 6164],
[1, 6168],
[1, 6172],
[1, 6176],
[1, 6180],
[1, 6184],
[1, 6188],
[1, 6192],
[1, 6196],
[1, 6200],
[1, 6204],
[1, 6208],
[1, 6212],
[1, 6216],
[1, 6220],
[1, 6224],
[1, 6228],
[1, 6232],
[1, 6236],
[1, 6240],
[1, 6244],
[1, 6248],
[1, 6252],
[1, 6256],
[1, 6260],
[1, 6264],
[1, 6268],
[1, 6272],
[1, 6276],
[1, 6280],
[1, 6284],
[1, 6288],
[1, 6292],
[1, 6296],
[1, 6300],
[1, 6304],
[1, 6308],
[1, 6312],
[1, 6316],
[1, 6320],
[1, 6324],
[1, 6328],
[1, 6332],
[1, 6336],
[1, 6340],
[1, 6344],
[1, 6348],
[1, 6352],
[1, 6356],
[1, 6360],
[1, 6364],
[1, 6368],
[1, 6372],
[1, 6376],
[1, 6380],
[1, 6384],
[1, 6388],
[1, 6392],
[1, 6396],
[1, 6400],
[1, 6404],
[1, 6408],
[1, 6412],
[1, 6416],
[1, 6420],
[1, 6424],
[1, 6428],
[1, 6432],
[1, 6436],
[1, 6440],
[1, 6444],
[1, 6448],
[1, 6452],
[1, 6456],
[1, 6460],
[1, 6464],
[1, 6468],
[1, 6472],
[1, 6476],
[1, 6480],
[1, 6484],
[1, 6488],
[1, 6492],
[1, 6496],
[1, 6500],
[1, 6504],
[1, 6508],
[1, 6512],
[1, 6516],
[1, 6520],
[1, 6524],
[1, 6528],
[1, 6532],
[1, 6536],
[1, 6540],
[1, 6544],
[1, 6548],
[1, 6552],
[1, 6556],
[1, 6560],
[1, 6564],
[1, 6568],
[1, 6572],
[1, 6576],
[1, 6580],
[1, 6584],
[1, 6588],
[1, 6592],
[1, 6596],
[1, 6600],
[1, 6604],
[1, 6608],
[1, 6612],
[1, 6616],
[1, 6620],
[1, 6624],
[1, 6628],
[1, 6632],
[1, 6636],
[1, 6640],
[1, 6644],
[1, 6648],
[1, 6652],
[1, 6656],
[1, 6660],
[1, 6664],
[1, 6668],
[1, 6672],
[1, 6676],
[1, 6680],
[1, 6684],
[1, 6688],
[1, 6692],
[1, 6696],
[1, 6700],
[1, 6704],
[1, 6708],
[1, 6712],
[1, 6716],
[1, 6720],
[1, 6724],
[1, 6728],
[1, 6732],
[1, 6736],
[1, 6740],
[1, 6744],
[1, 6748],
[1, 6752],
[1, 6756],
[1, 6760],
[1, 6764],
[1, 6768],
[1, 6772],
[1, 6776],
[1, 6780],
[1, 6784],
[1, 6788],
[1, 6792],
[1, 6796],
[1, 6800],
[1, 6804],
[1, 6808],
[1, 6812],
[1, 6816],
[1, 6820],
[1, 6824],
[1, 6828],
[1, 6832],
[1, 6836],
[1, 6840],
[1, 6844],
[1, 6848],
[1, 6852],
[1, 6856],
[1, 6860],
[1, 6864],
[1, 6868],
[1, 6872],
[1, 6876],
[1, 6880],
[1, 6884],
[1, 6888],
[1, 6892],
[1, 6896],
[1, 6900],
[1, 6904],
[1, 6908],
[1, 6912],
[1, 6916],
[1, 6920],
[1, 6924],
[1, 6928],
[1, 6932],
[1, 6936],
[1, 6940],
[1, 6944],
[1, 6948],
[1, 6952],
[1, 6956],
[1, 6960],
[1, 6964],
[1, 6968],
[1, 6972],
[1, 6976],
[1, 6980],
[1, 6984],
[1, 6988],
[1, 6992],
[1, 6996],
[1, 7000]]
return zip(*data)
def expected_wli01_1000():
# these data were generated by Mathematica
data = [
[0.042451002, 0.00E+00],
[0.043658853, 1.372734416],
[0.044879384, 2.746613827],
[0.04611224, 4.121651964],
[0.047357065, 5.497862284],
[0.04861351, 6.875257977],
[0.049881225, 8.253851962],
[0.051159865, 9.633656889],
[0.052449088, 11.01468514],
[0.053748558, 12.39694884],
[0.05505794, 13.78045985],
[0.056376905, 15.16522975],
[0.057705131, 16.55126991],
[0.059042298, 17.9385914],
[0.060388092, 19.32720507],
[0.061742206, 20.7171215],
[0.063104336, 22.10835106],
[0.064474186, 23.50090385],
[0.065851465, 24.89478975],
[0.067235886, 26.29001841],
[0.068627171, 27.68659924],
[0.070025045, 29.08454144],
[0.071429242, 30.48385399],
[0.0728395, 31.88454564],
[0.074255562, 33.28662496],
[0.075677179, 34.69010028],
[0.077104108, 36.09497975],
[0.07853611, 37.50127131],
[0.079972953, 38.90898273],
[0.081414412, 40.31812156],
[0.082860265, 41.72869518],
[0.084310298, 43.14071079],
[0.085764302, 44.55417542],
[0.087222073, 45.9690959],
[0.088683413, 47.38547893],
[0.090148129, 48.80333102],
[0.091616033, 50.22265852],
[0.093086943, 51.64346763],
[0.094560681, 53.06576441],
[0.096037076, 54.48955474],
[0.097515959, 55.91484439],
[0.098997169, 57.34163896],
[0.100480546, 58.76994392],
[0.101965938, 60.19976461],
[0.103453195, 61.63110625],
[0.104942174, 63.06397391],
[0.106432732, 64.49837254],
[0.107924735, 65.93430698],
[0.109418051, 67.37178195],
[0.11091255, 68.81080204],
[0.112408109, 70.25137175],
[0.113904607, 71.69349545],
[0.115401928, 73.13717742],
[0.116899958, 74.58242183],
[0.118398587, 76.02923275],
[0.119897708, 77.47761416],
[0.121397219, 78.92756992],
[0.12289702, 80.37910382],
[0.124397014, 81.83221956],
[0.125897107, 83.28692074],
[0.127397208, 84.74321088],
[0.128897229, 86.20109342],
[0.130397084, 87.66057171],
[0.131896693, 89.12164902],
[0.133395973, 90.58432857],
[0.134894849, 92.04861348],
[0.136393245, 93.51450679],
[0.137891088, 94.98201149],
[0.139388309, 96.45113048],
[0.14088484, 97.92186663],
[0.142380614, 99.3942227],
[0.143875569, 100.8682014],
[0.145369643, 102.3438054],
[0.146862776, 103.8210373],
[0.14835491, 105.2998996],
[0.149845991, 106.7803949],
[0.151335964, 108.2625254],
[0.152824777, 109.7462937],
[0.154312379, 111.2317019],
[0.155798723, 112.7187525],
[0.157283761, 114.2074475],
[0.158767447, 115.6977892],
[0.160249739, 117.1897796],
[0.161730592, 118.6834209],
[0.163209966, 120.1787151],
[0.164687822, 121.6756641],
[0.166164121, 123.1742699],
[0.167638826, 124.6745344],
[0.169111902, 126.1764595],
[0.170583313, 127.6800469],
[0.172053028, 129.1852985],
[0.173521012, 130.692216],
[0.174987236, 132.200801],
[0.17645167, 133.7110554],
[0.177914285, 135.2229806],
[0.179375052, 136.7365783],
[0.180833945, 138.2518501],
[0.182290938, 139.7687975],
[0.183746006, 141.2874221],
[0.185199125, 142.8077252],
[0.186650272, 144.3297083],
[0.188099425, 145.8533729],
[0.189546561, 147.3787203],
[0.190991661, 148.9057519],
[0.192434704, 150.4344691],
[0.193875671, 151.964873],
[0.195314543, 153.4969651],
[0.196751304, 155.0307465],
[0.198185935, 156.5662185],
[0.199618421, 158.1033823],
[0.201048745, 159.6422391],
[0.202476893, 161.18279],
[0.203902851, 162.7250362],
[0.205326603, 164.2689788],
[0.206748137, 165.8146189],
[0.20816744, 167.3619575],
[0.2095845, 168.9109957],
[0.210999305, 170.4617346],
[0.212411843, 172.0141751],
[0.213822105, 173.5683183],
[0.215230079, 175.1241652],
[0.216635757, 176.6817166],
[0.218039128, 178.2409735],
[0.219440184, 179.8019369],
[0.220838916, 181.3646077],
[0.222235316, 182.9289867],
[0.223629377, 184.4950748],
[0.225021091, 186.0628729],
[0.226410452, 187.6323819],
[0.227797452, 189.2036025],
[0.229182086, 190.7765356],
[0.230564348, 192.351182],
[0.231944231, 193.9275424],
[0.233321732, 195.5056176],
[0.234696846, 197.0854084],
[0.236069566, 198.6669156],
[0.23743989, 200.2501398],
[0.238807814, 201.8350818],
[0.240173333, 203.4217423],
[0.241536444, 205.010122],
[0.242897144, 206.6002215],
[0.24425543, 208.1920416],
[0.2456113, 209.7855829],
[0.246964751, 211.3808461],
[0.24831578, 212.9778317],
[0.249664386, 214.5765405],
[0.251010568, 216.176973],
[0.252354323, 217.7791299],
[0.25369565, 219.3830116],
[0.255034548, 220.988619],
[0.256371016, 222.5959524],
[0.257705053, 224.2050125],
[0.259036659, 225.8157999],
[0.260365834, 227.428315],
[0.261692577, 229.0425585],
[0.263016888, 230.6585308],
[0.264338767, 232.2762325],
[0.265658215, 233.8956642],
[0.266975232, 235.5168262],
[0.268289818, 237.1397192],
[0.269601975, 238.7643436],
[0.270911703, 240.3906999],
[0.272219002, 242.0187885],
[0.273523876, 243.64861],
[0.274826323, 245.2801649],
[0.276126347, 246.9134534],
[0.277423948, 248.5484762],
[0.278719128, 250.1852337],
[0.280011889, 251.8237262],
[0.281302233, 253.4639543],
[0.282590161, 255.1059183],
[0.283875676, 256.7496187],
[0.28515878, 258.3950559],
[0.286439475, 260.0422302],
[0.287717763, 261.6911421],
[0.288993647, 263.341792],
[0.29026713, 264.9941802],
[0.291538214, 266.6483072],
[0.292806901, 268.3041733],
[0.294073195, 269.9617789],
[0.295337099, 271.6211243],
[0.296598615, 273.28221],
[0.297857746, 274.9450362],
[0.299114496, 276.6096033],
[0.300368867, 278.2759117],
[0.301620864, 279.9439617],
[0.302870488, 281.6137537],
[0.304117744, 283.2852879],
[0.305362635, 284.9585647],
[0.306605165, 286.6335844],
[0.307845336, 288.3103473],
[0.309083153, 289.9888538],
[0.310318619, 291.6691042],
[0.311551738, 293.3510987],
[0.312782514, 295.0348377],
[0.31401095, 296.7203214],
[0.31523705, 298.4075502],
[0.316460819, 300.0965243],
[0.317682259, 301.7872441],
[0.318901376, 303.4797097],
[0.320118173, 305.1739215],
[0.321332654, 306.8698798],
[0.322544823, 308.5675848],
[0.323754684, 310.2670368],
[0.324962242, 311.9682361],
[0.326167501, 313.6711828],
[0.327370465, 315.3758774],
[0.328571138, 317.0823199],
[0.329769524, 318.7905108],
[0.330965629, 320.5004501],
[0.332159455, 322.2121382],
[0.333351008, 323.9255753],
[0.334540292, 325.6407617],
[0.335727312, 327.3576975],
[0.336912071, 329.076383],
[0.338094575, 330.7968184],
[0.339274827, 332.5190041],
[0.340452833, 334.24294],
[0.341628597, 335.9686266],
[0.342802124, 337.696064],
[0.343973417, 339.4252525],
[0.345142482, 341.1561922],
[0.346309324, 342.8888833],
[0.347473946, 344.6233261],
[0.348636354, 346.3595207],
[0.349796553, 348.0974674],
[0.350954546, 349.8371664],
[0.352110339, 351.5786179],
[0.353263936, 353.321822],
[0.354415342, 355.0667789],
[0.355564562, 356.8134889],
[0.356711601, 358.5619521],
[0.357856463, 360.3121687],
[0.358999153, 362.064139],
[0.360139676, 363.817863],
[0.361278036, 365.5733409],
[0.362414239, 367.330573],
[0.363548289, 369.0895594],
[0.364680192, 370.8503003],
[0.36580995, 372.6127958],
[0.366937571, 374.3770461],
[0.368063058, 376.1430514],
[0.369186416, 377.9108119],
[0.370307651, 379.6803277],
[0.371426766, 381.451599],
[0.372543767, 383.2246259],
[0.373658659, 384.9994086],
[0.374771446, 386.7759473],
[0.375882134, 388.554242],
[0.376990726, 390.334293],
[0.378097229, 392.1161005],
[0.379201647, 393.8996644],
[0.380303984, 395.6849851],
[0.381404246, 397.4720627],
[0.382502437, 399.2608972],
[0.383598563, 401.0514889],
[0.384692628, 402.8438378],
[0.385784636, 404.6379442],
[0.386874594, 406.4338081],
[0.387962505, 408.2314297],
[0.389048375, 410.0308092],
[0.390132208, 411.8319466],
[0.39121401, 413.6348421],
[0.392293784, 415.4394958],
[0.393371536, 417.2459079],
[0.394447271, 419.0540784],
[0.395520994, 420.8640076],
[0.396592709, 422.6756955],
[0.397662421, 424.4891423],
[0.398730135, 426.3043481],
[0.399795856, 428.1213129],
[0.400859589, 429.940037],
[0.401921338, 431.7605204],
[0.402981109, 433.5827633],
[0.404038905, 435.4067658],
[0.405094733, 437.232528],
[0.406148596, 439.0600499],
[0.407200499, 440.8893318],
[0.408250447, 442.7203738],
[0.409298446, 444.5531758],
[0.410344499, 446.3877381],
[0.411388612, 448.2240608],
[0.412430789, 450.062144],
[0.413471034, 451.9019877],
[0.414509354, 453.7435921],
[0.415545752, 455.5869573],
[0.416580233, 457.4320834],
[0.417612802, 459.2789705],
[0.418643463, 461.1276186],
[0.419672222, 462.978028],
[0.420699082, 464.8301986],
[0.421724049, 466.6841307],
[0.422747128, 468.5398242],
[0.423768322, 470.3972793],
[0.424787637, 472.2564961],
[0.425805077, 474.1174747],
[0.426820647, 475.9802151],
[0.427834352, 477.8447175],
[0.428846196, 479.7109819],
[0.429856183, 481.5790085],
[0.430864319, 483.4487973],
[0.431870608, 485.3203485],
[0.432875055, 487.193662],
[0.433877663, 489.068738],
[0.434878438, 490.9455767],
[0.435877385, 492.824178],
[0.436874507, 494.704542],
[0.43786981, 496.5866689],
[0.438863297, 498.4705587],
[0.439854974, 500.3562115],
[0.440844845, 502.2436274],
[0.441832914, 504.1328065],
[0.442819186, 506.0237488],
[0.443803666, 507.9164544],
[0.444786357, 509.8109235],
[0.445767265, 511.707156],
[0.446746394, 513.6051521],
[0.447723747, 515.5049119],
[0.448699331, 517.4064353],
[0.449673148, 519.3097226],
[0.450645204, 521.2147737],
[0.451615503, 523.1215887],
[0.45258405, 525.0301678],
[0.453550847, 526.9405109],
[0.454515901, 528.8526182],
[0.455479216, 530.7664897],
[0.456440795, 532.6821256],
[0.457400643, 534.5995258],
[0.458358765, 536.5186904],
[0.459315164, 538.4396195],
[0.460269845, 540.3623132],
[0.461222813, 542.2867716],
[0.462174072, 544.2129947],
[0.463123625, 546.1409825],
[0.464071478, 548.0707352],
[0.465017634, 550.0022528],
[0.465962097, 551.9355354],
[0.466904873, 553.870583],
[0.467845965, 555.8073957],
[0.468785377, 557.7459736],
[0.469723114, 559.6863167],
[0.470659179, 561.628425],
[0.471593578, 563.5722988],
[0.472526313, 565.5179379],
[0.47345739, 567.4653425],
[0.474386812, 569.4145127],
[0.475314584, 571.3654484],
[0.476240709, 573.3181498],
[0.477165193, 575.2726169],
[0.478088038, 577.2288497],
[0.479009249, 579.1868484],
[0.47992883, 581.146613],
[0.480846786, 583.1081434],
[0.48176312, 585.0714399],
[0.482677836, 587.0365024],
[0.483590938, 589.0033311],
[0.48450243, 590.9719259],
[0.485412317, 592.9422869],
[0.486320603, 594.9144141],
[0.48722729, 596.8883077],
[0.488132385, 598.8639676],
[0.489035889, 600.841394],
[0.489937808, 602.8205868],
[0.490838145, 604.8015462],
[0.491736904, 606.7842721],
[0.492634089, 608.7687647],
[0.493529704, 610.7550239],
[0.494423753, 612.7430499],
[0.49531624, 614.7328427],
[0.496207169, 616.7244022],
[0.497096544, 618.7177287],
[0.497984368, 620.7128221],
[0.498870645, 622.7096824],
[0.499755379, 624.7083097],
[0.500638575, 626.7087042],
[0.501520235, 628.7108657],
[0.502400364, 630.7147944],
[0.503278965, 632.7204903],
[0.504156043, 634.7279534],
[0.505031601, 636.7371838],
[0.505905642, 638.7481816],
[0.506778171, 640.7609467],
[0.507649191, 642.7754793],
[0.508518707, 644.7917794],
[0.509386721, 646.8098469],
[0.510253238, 648.8296821],
[0.51111826, 650.8512848],
[0.511981793, 652.8746551],
[0.512843839, 654.8997932],
[0.513704403, 656.9266989],
[0.514563487, 658.9553725],
[0.515421096, 660.9858138],
[0.516277234, 663.018023],
[0.517131903, 665.0520001],
[0.517985107, 667.0877451],
[0.518836851, 669.1252581],
[0.519687137, 671.1645391],
[0.52053597, 673.2055881],
[0.521383352, 675.2484053],
[0.522229288, 677.2929905],
[0.523073781, 679.3393439],
[0.523916834, 681.3874655],
[0.524758451, 683.4373554],
[0.525598636, 685.4890135],
[0.526437392, 687.54244],
[0.527274723, 689.5976348],
[0.528110632, 691.654598],
[0.528945122, 693.7133296],
[0.529778198, 695.7738296],
[0.530609862, 697.8360982],
[0.531440118, 699.9001353],
[0.532268969, 701.9659409],
[0.53309642, 704.0335152],
[0.533922472, 706.1028581],
[0.534747131, 708.1739697],
[0.535570398, 710.2468499],
[0.536392279, 712.321499],
[0.537212775, 714.3979167],
[0.53803189, 716.4761033],
[0.538849628, 718.5560587],
[0.539665992, 720.637783],
[0.540480985, 722.7212762],
[0.541294611, 724.8065384],
[0.542106874, 726.8935695],
[0.542917775, 728.9823696],
[0.543727319, 731.0729387],
[0.544535509, 733.1652769],
[0.545342349, 735.2593842],
[0.54614784, 737.3552606],
[0.546951988, 739.4529062],
[0.547754795, 741.552321],
[0.548556264, 743.6535049],
[0.549356398, 745.7564582],
[0.550155202, 747.8611807],
[0.550952677, 749.9676725],
[0.551748828, 752.0759336],
[0.552543657, 754.1859641],
[0.553337167, 756.297764],
[0.554129363, 758.4113334],
[0.554920246, 760.5266722],
[0.55570982, 762.6437804],
[0.556498089, 764.7626582],
[0.557285055, 766.8833055],
[0.558070722, 769.0057224],
[0.558855092, 771.1299088],
[0.559638169, 773.2558649],
[0.560419956, 775.3835906],
[0.561200456, 777.513086],
[0.561979673, 779.6443511],
[0.562757608, 781.7773859],
[0.563534266, 783.9121905],
[0.564309648, 786.0487649],
[0.56508376, 788.187109],
[0.565856602, 790.327223],
[0.56662818, 792.4691069],
[0.567398494, 794.6127606],
[0.568167549, 796.7581842],
[0.568935348, 798.9053778],
[0.569701893, 801.0543413],
[0.570467188, 803.2050748],
[0.571231235, 805.3575783],
[0.571994037, 807.5118519],
[0.572755598, 809.6678955],
[0.57351592, 811.8257092],
[0.574275007, 813.9852929],
[0.57503286, 816.1466468],
[0.575789484, 818.3097709],
[0.576544881, 820.4746651],
[0.577299055, 822.6413295],
[0.578052006, 824.8097642],
[0.57880374, 826.9799691],
[0.579554259, 829.1519442],
[0.580303565, 831.3256897],
[0.581051661, 833.5012054],
[0.581798551, 835.6784915],
[0.582544237, 837.8575479],
[0.583288721, 840.0383747],
[0.584032008, 842.2209719],
[0.584774099, 844.4053396],
[0.585514998, 846.5914776],
[0.586254707, 848.7793861],
[0.586993229, 850.9690652],
[0.587730567, 853.1605147],
[0.588466724, 855.3537347],
[0.589201702, 857.5487253],
[0.589935504, 859.7454864],
[0.590668133, 861.9440182],
[0.591399592, 864.1443205],
[0.592129883, 866.3463935],
[0.59285901, 868.5502371],
[0.593586974, 870.7558514],
[0.594313779, 872.9632364],
[0.595039428, 875.172392],
[0.595763922, 877.3833184],
[0.596487265, 879.5960156],
[0.59720946, 881.8104835],
[0.597930508, 884.0267222],
[0.598650414, 886.2447317],
[0.599369179, 888.464512],
[0.600086806, 890.6860631],
[0.600803297, 892.9093852],
[0.601518656, 895.134478],
[0.602232885, 897.3613418],
[0.602945987, 899.5899765],
[0.603657964, 901.8203821],
[0.604368819, 904.0525587],
[0.605078554, 906.2865062],
[0.605787172, 908.5222247],
[0.606494676, 910.7597143],
[0.607201068, 912.9989748],
[0.607906351, 915.2400064],
[0.608610527, 917.482809],
[0.609313599, 919.7273827],
[0.61001557, 921.9737275],
[0.610716442, 924.2218433],
[0.611416217, 926.4717303],
[0.612114898, 928.7233885],
[0.612812487, 930.9768178],
[0.613508988, 933.2320182],
[0.614204403, 935.4889898],
[0.614898733, 937.7477327],
[0.615591982, 940.0082467],
[0.616284152, 942.270532],
[0.616975245, 944.5345886],
[0.617665265, 946.8004164],
[0.618354213, 949.0680154],
[0.619042092, 951.3373858],
[0.619728904, 953.6085275],
[0.620414652, 955.8814405],
[0.621099338, 958.1561249],
[0.621782964, 960.4325806],
[0.622465534, 962.7108076],
[0.623147049, 964.9908061],
[0.623827512, 967.2725759],
[0.624506926, 969.5561172],
[0.625185292, 971.8414299],
[0.625862613, 974.128514],
[0.626538891, 976.4173696],
[0.627214129, 978.7079967],
[0.627888329, 981.0003953],
[0.628561494, 983.2945653],
[0.629233625, 985.5905069],
[0.629904726, 987.88822],
[0.630574798, 990.1877046],
[0.631243844, 992.4889608],
[0.631911866, 994.7919885],
[0.632578866, 997.0967878],
[0.633244848, 999.4033587],
[0.633909812, 1001.711701],
[0.634573761, 1004.021815],
[0.635236699, 1006.333701],
[0.635898626, 1008.647359],
[0.636559545, 1010.962788],
[0.637219459, 1013.279988],
[0.637878369, 1015.598961],
[0.638536279, 1017.919705],
[0.639193189, 1020.242221],
[0.639849103, 1022.566508],
[0.640504023, 1024.892568],
[0.64115795, 1027.220399],
[0.641810888, 1029.550001],
[0.642462838, 1031.881376],
[0.643113803, 1034.214522],
[0.643763784, 1036.54944],
[0.644412784, 1038.88613],
[0.645060805, 1041.224592],
[0.64570785, 1043.564826],
[0.64635392, 1045.906831],
[0.646999018, 1048.250608],
[0.647643145, 1050.596158],
[0.648286305, 1052.943479],
[0.648928498, 1055.292571],
[0.649569728, 1057.643436],
[0.650209996, 1059.996073],
[0.650849305, 1062.350481],
[0.651487657, 1064.706662],
[0.652125053, 1067.064614],
[0.652761496, 1069.424339],
[0.653396988, 1071.785835],
[0.654031531, 1074.149103],
[0.654665128, 1076.514144],
[0.655297779, 1078.880956],
[0.655929488, 1081.24954],
[0.656560257, 1083.619897],
[0.657190086, 1085.992025],
[0.65781898, 1088.365925],
[0.658446939, 1090.741598],
[0.659073966, 1093.119042],
[0.659700062, 1095.498259],
[0.660325231, 1097.879247],
[0.660949473, 1100.262008],
[0.661572791, 1102.64654],
[0.662195187, 1105.032845],
[0.662816663, 1107.420922],
[0.66343722, 1109.810771],
[0.664056862, 1112.202392],
[0.664675589, 1114.595786],
[0.665293405, 1116.990951],
[0.66591031, 1119.387889],
[0.666526307, 1121.786598],
[0.667141398, 1124.18708],
[0.667755585, 1126.589334],
[0.668368869, 1128.993361],
[0.668981254, 1131.399159],
[0.66959274, 1133.80673],
[0.67020333, 1136.216073],
[0.670813025, 1138.627188],
[0.671421828, 1141.040075],
[0.67202974, 1143.454735],
[0.672636764, 1145.871167],
[0.673242901, 1148.289371],
[0.673848153, 1150.709347],
[0.674452523, 1153.131096],
[0.675056012, 1155.554617],
[0.675658621, 1157.97991],
[0.676260354, 1160.406975],
[0.676861211, 1162.835813],
[0.677461195, 1165.266423],
[0.678060307, 1167.698805],
[0.67865855, 1170.13296],
[0.679255925, 1172.568887],
[0.679852434, 1175.006587],
[0.68044808, 1177.446058],
[0.681042863, 1179.887302],
[0.681636785, 1182.330319],
[0.68222985, 1184.775108],
[0.682822057, 1187.221669],
[0.68341341, 1189.670002],
[0.68400391, 1192.120108],
[0.684593559, 1194.571987],
[0.685182359, 1197.025637],
[0.685770311, 1199.48106],
[0.686357417, 1201.938256],
[0.686943679, 1204.397224],
[0.6875291, 1206.857964],
[0.68811368, 1209.320477],
[0.688697421, 1211.784763],
[0.689280326, 1214.25082],
[0.689862396, 1216.71865],
[0.690443632, 1219.188253],
[0.691024038, 1221.659628],
[0.691603613, 1224.132776],
[0.692182361, 1226.607696],
[0.692760282, 1229.084388],
[0.693337379, 1231.562853],
[0.693913654, 1234.043091],
[0.694489107, 1236.525101],
[0.695063742, 1239.008883],
[0.695637559, 1241.494439],
[0.69621056, 1243.981766],
[0.696782747, 1246.470866],
[0.697354122, 1248.961739],
[0.697924686, 1251.454384],
[0.698494442, 1253.948802],
[0.69906339, 1256.444992],
[0.699631533, 1258.942955],
[0.700198872, 1261.44269],
[0.700765408, 1263.944198],
[0.701331145, 1266.447478],
[0.701896083, 1268.952531],
[0.702460223, 1271.459357],
[0.703023568, 1273.967955],
[0.70358612, 1276.478326],
[0.704147879, 1278.990469],
[0.704708848, 1281.504385],
[0.705269028, 1284.020074],
[0.705828421, 1286.537535],
[0.706387029, 1289.056769],
[0.706944852, 1291.577775],
[0.707501894, 1294.100554],
[0.708058155, 1296.625106],
[0.708613637, 1299.15143],
[0.709168341, 1301.679527],
[0.70972227, 1304.209397],
[0.710275424, 1306.741039],
[0.710827806, 1309.274454],
[0.711379418, 1311.809641],
[0.711930259, 1314.346601],
[0.712480333, 1316.885334],
[0.713029641, 1319.42584],
[0.713578184, 1321.968118],
[0.714125965, 1324.512169],
[0.714672984, 1327.057992],
[0.715219243, 1329.605588],
[0.715764743, 1332.154957],
[0.716309487, 1334.706098],
[0.716853476, 1337.259013],
[0.717396711, 1339.8137],
[0.717939194, 1342.370159],
[0.718480927, 1344.928391],
[0.719021911, 1347.488396],
[0.719562147, 1350.050174],
[0.720101637, 1352.613724],
[0.720640383, 1355.179047],
[0.721178386, 1357.746143],
[0.721715647, 1360.315012],
[0.722252169, 1362.885653],
[0.722787953, 1365.458067],
[0.723322999, 1368.032253],
[0.723857311, 1370.608213],
[0.724390888, 1373.185945],
[0.724923733, 1375.76545],
[0.725455848, 1378.346727],
[0.725987233, 1380.929778],
[0.72651789, 1383.514601],
[0.727047821, 1386.101197],
[0.727577027, 1388.689565],
[0.728105509, 1391.279707],
[0.72863327, 1393.871621],
[0.72916031, 1396.465307],
[0.729686631, 1399.060767],
[0.730212234, 1401.657999],
[0.730737122, 1404.257004],
[0.731261294, 1406.857782],
[0.731784754, 1409.460333],
[0.732307501, 1412.064656],
[0.732829538, 1414.670753],
[0.733350867, 1417.278621],
[0.733871487, 1419.888263],
[0.734391402, 1422.499678],
[0.734910612, 1425.112865],
[0.735429119, 1427.727825],
[0.735946924, 1430.344558],
[0.736464028, 1432.963063],
[0.736980434, 1435.583342],
[0.737496142, 1438.205393],
[0.738011153, 1440.829217],
[0.73852547, 1443.454813],
[0.739039094, 1446.082183],
[0.739552025, 1448.711325],
[0.740064266, 1451.34224],
[0.740575817, 1453.974928],
[0.741086681, 1456.609389],
[0.741596858, 1459.245622],
[0.742106349, 1461.883629],
[0.742615157, 1464.523408],
[0.743123283, 1467.16496],
[0.743630728, 1469.808284],
[0.744137492, 1472.453382],
[0.744643579, 1475.100252],
[0.745148988, 1477.748895],
[0.745653722, 1480.399311],
[0.746157781, 1483.0515],
[0.746661168, 1485.705461],
[0.747163882, 1488.361195],
[0.747665927, 1491.018703],
[0.748167302, 1493.677982],
[0.748668009, 1496.339035],
[0.749168051, 1499.001861],
[0.749667427, 1501.666459],
[0.750166139, 1504.33283],
[0.750664189, 1507.000974],
[0.751161578, 1509.670891],
[0.751658307, 1512.34258],
[0.752154378, 1515.016043],
[0.752649791, 1517.691278],
[0.753144548, 1520.368286],
[0.753638651, 1523.047066],
[0.7541321, 1525.72762],
[0.754624897, 1528.409946],
[0.755117043, 1531.094045],
[0.75560854, 1533.779917],
[0.756099389, 1536.467562],
[0.75658959, 1539.15698],
[0.757079146, 1541.84817],
[0.757568057, 1544.541133],
[0.758056325, 1547.235869],
[0.758543951, 1549.932378],
[0.759030936, 1552.63066],
[0.759517282, 1555.330714],
[0.76000299, 1558.032542],
[0.760488061, 1560.736142],
[0.760972496, 1563.441514],
[0.761456296, 1566.14866],
[0.761939463, 1568.857578],
[0.762421998, 1571.56827],
[0.762903903, 1574.280734],
[0.763385177, 1576.994971],
[0.763865824, 1579.71098],
[0.764345843, 1582.428763],
[0.764825236, 1585.148318],
[0.765304005, 1587.869646],
[0.76578215, 1590.592746],
[0.766259672, 1593.31762],
[0.766736574, 1596.044266],
[0.767212855, 1598.772685],
[0.767688518, 1601.502877],
[0.768163563, 1604.234842],
[0.768637992, 1606.96858],
[0.769111806, 1609.70409],
[0.769585006, 1612.441373],
[0.770057593, 1615.180429],
[0.770529568, 1617.921257],
[0.771000933, 1620.663859],
[0.771471689, 1623.408233],
[0.771941836, 1626.15438],
[0.772411377, 1628.902299],
[0.772880311, 1631.651992],
[0.773348642, 1634.403457],
[0.773816368, 1637.156695],
[0.774283492, 1639.911705],
[0.774750015, 1642.668489],
[0.775215938, 1645.427045],
[0.775681263, 1648.187374],
[0.776145989, 1650.949476],
[0.776610119, 1653.71335],
[0.777073653, 1656.478997],
[0.777536593, 1659.246417],
[0.77799894, 1662.01561],
[0.778460694, 1664.786575],
[0.778921858, 1667.559314],
[0.779382432, 1670.333824],
[0.779842417, 1673.110108],
[0.780301814, 1675.888164],
[0.780760625, 1678.667993],
[0.781218851, 1681.449595],
[0.781676492, 1684.23297],
[0.78213355, 1687.018117],
[0.782590026, 1689.805037],
[0.783045921, 1692.593729],
[0.783501236, 1695.384195],
[0.783955972, 1698.176433],
[0.784410131, 1700.970443],
[0.784863713, 1703.766227],
[0.785316719, 1706.563783],
[0.785769151, 1709.363111],
[0.786221009, 1712.164213],
[0.786672296, 1714.967087],
[0.787123011, 1717.771734],
[0.787573155, 1720.578153],
[0.788022731, 1723.386345],
[0.788471739, 1726.19631],
[0.788920179, 1729.008047],
[0.789368054, 1731.821558],
[0.789815364, 1734.63684],
[0.79026211, 1737.453896],
[0.790708293, 1740.272724],
[0.791153915, 1743.093324],
[0.791598976, 1745.915698],
[0.792043477, 1748.739844],
[0.79248742, 1751.565762],
[0.792930805, 1754.393453],
[0.793373634, 1757.222917],
[0.793815907, 1760.054153],
[0.794257626, 1762.887162],
[0.794698791, 1765.721944],
[0.795139404, 1768.558498],
[0.795579466, 1771.396825],
[0.796018977, 1774.236924],
[0.796457939, 1777.078796],
[0.796896353, 1779.922441],
[0.797334219, 1782.767858],
[0.797771539, 1785.615048],
[0.798208314, 1788.46401],
[0.798644544, 1791.314745],
[0.799080232, 1794.167252],
[0.799515376, 1797.021532],
[0.79994998, 1799.877585],
[0.800384043, 1802.735409],
[0.800817568, 1805.595007],
[0.801250553, 1808.456377],
[0.801683002, 1811.31952],
[0.802114914, 1814.184435],
[0.802546291, 1817.051122],
[0.802977133, 1819.919582],
[0.803407442, 1822.789815],
[0.803837218, 1825.66182],
[0.804266464, 1828.535597],
[0.804695178, 1831.411147],
[0.805123363, 1834.28847],
[0.80555102, 1837.167565],
[0.805978149, 1840.048432],
[0.806404751, 1842.931072],
[0.806830828, 1845.815485],
[0.80725638, 1848.701669],
[0.807681408, 1851.589626],
[0.808105914, 1854.479356],
[0.808529897, 1857.370858],
[0.80895336, 1860.264133],
[0.809376303, 1863.159179],
[0.809798726, 1866.055999],
[0.810220632, 1868.95459],
[0.810642021, 1871.854954],
[0.811062893, 1874.757091],
[0.81148325, 1877.661],
[0.811903093, 1880.566681],
[0.812322422, 1883.474134],
[0.812741238, 1886.38336],
[0.813159544, 1889.294358],
[0.813577338, 1892.207129],
[0.813994623, 1895.121672],
[0.814411399, 1898.037987],
[0.814827667, 1900.956075],
[0.815243428, 1903.875934],
[0.815658683, 1906.797567],
[0.816073433, 1909.720971],
[0.816487678, 1912.646148],
[0.81690142, 1915.573097],
[0.81731466, 1918.501818],
[0.817727398, 1921.432311],
[0.818139636, 1924.364577],
[0.818551373, 1927.298615],
[0.818962612, 1930.234425],
[0.819373353, 1933.172008],
[0.819783597, 1936.111362],
[0.820193344, 1939.052489],
[0.820602596, 1941.995388],
[0.821011354, 1944.94006],
[0.821419618, 1947.886503],
[0.821827389, 1950.834719],
[0.822234669, 1953.784706],
[0.822641457, 1956.736466],
[0.823047756, 1959.689998],
[0.823453565, 1962.645303],
[0.823858886, 1965.602379],
[0.824263719, 1968.561227],
[0.824668066, 1971.521848],
[0.825071927, 1974.484241],
[0.825475303, 1977.448405],
[0.825878195, 1980.414342],
[0.826280604, 1983.382051],
[0.82668253, 1986.351532],
[0.827083975, 1989.322785],
[0.827484939, 1992.29581],
[0.827885424, 1995.270607],
[0.828285429, 1998.247177],
[0.828684957, 2001.225518],
[0.829084007, 2004.205631],
[0.82948258, 2007.187516],
[0.829880678, 2010.171173],
[0.830278302, 2013.156602],
[0.830675451, 2016.143803],
[0.831072127, 2019.132776],
[0.831468331, 2022.123521],
[0.831864063, 2025.116038],
[0.832259324, 2028.110327],
[0.832654116, 2031.106388],
[0.833048439, 2034.10422],
[0.833442293, 2037.103825],
[0.833835681, 2040.105201],
[0.834228601, 2043.108349],
[0.834621056, 2046.113269],
[0.835013046, 2049.119961],
[0.835404571, 2052.128425],
[0.835795634, 2055.13866],
[0.836186234, 2058.150668],
[0.836576372, 2061.164447],
[0.836966049, 2064.179998],
[0.837355265, 2067.19732],
[0.837744023, 2070.216415],
[0.838132322, 2073.237281],
[0.838520163, 2076.259919],
[0.838907547, 2079.284328],
[0.839294475, 2082.31051],
[0.839680948, 2085.338463],
[0.840066966, 2088.368187],
[0.84045253, 2091.399684],
[0.840837641, 2094.432952],
[0.8412223, 2097.467991],
[0.841606507, 2100.504803],
[0.841990263, 2103.543386],
[0.84237357, 2106.58374],
[0.842756427, 2109.625866],
[0.843138836, 2112.669764],
[0.843520797, 2115.715433],
[0.843902311, 2118.762874],
[0.844283379, 2121.812086],
[0.844664002, 2124.86307],
[0.84504418, 2127.915825],
[0.845423914, 2130.970352],
[0.845803205, 2134.02665],
[0.846182053, 2137.08472],
[0.846560461, 2140.144561],
[0.846938427, 2143.206174],
[0.847315953, 2146.269558],
[0.847693039, 2149.334714],
[0.848069687, 2152.401641],
[0.848445897, 2155.470339],
[0.84882167, 2158.540808],
[0.849197007, 2161.613049],
[0.849571908, 2164.687062],
[0.849946374, 2167.762845],
[0.850320405, 2170.8404],
[0.850694004, 2173.919726],
[0.851067169, 2177.000824],
[0.851439903, 2180.083692],
[0.851812205, 2183.168332],
[0.852184076, 2186.254743],
[0.852555518, 2189.342926],
[0.852926531, 2192.432879],
[0.853297115, 2195.524604],
[0.853667271, 2198.6181],
[0.854037001, 2201.713367],
[0.854406304, 2204.810405],
[0.854775182, 2207.909214],
[0.855143634, 2211.009795],
[0.855511663, 2214.112146],
[0.855879268, 2217.216268],
[0.856246451, 2220.322162],
[0.856613211, 2223.429826],
[0.85697955, 2226.539262],
[0.857345469, 2229.650468],
[0.857710967, 2232.763446],
[0.858076046, 2235.878194],
[0.858440707, 2238.994714],
[0.85880495, 2242.113004],
[0.859168776, 2245.233065],
[0.859532185, 2248.354897],
[0.859895178, 2251.4785],
[0.860257757, 2254.603873],
[0.860619921, 2257.731018],
[0.860981671, 2260.859933],
[0.861343008, 2263.990619],
[0.861703933, 2267.123076],
[0.862064446, 2270.257304],
[0.862424548, 2273.393302],
[0.862784239, 2276.531071],
[0.863143521, 2279.67061],
[0.863502394, 2282.811921],
[0.863860859, 2285.955002],
[0.864218916, 2289.099853],
[0.864576566, 2292.246475],
[0.864933809, 2295.394868],
[0.865290647, 2298.545032],
[0.86564708, 2301.696965],
[0.866003108, 2304.85067],
[0.866358733, 2308.006145],
[0.866713955, 2311.16339],
[0.867068774, 2314.322406],
[0.867423192, 2317.483192],
[0.867777208, 2320.645748],
[0.868130824, 2323.810075],
[0.86848404, 2326.976173],
[0.868836857, 2330.14404],
[0.869189276, 2333.313678],
[0.869541296, 2336.485087],
[0.86989292, 2339.658265],
[0.870244146, 2342.833214],
[0.870594977, 2346.009933],
[0.870945413, 2349.188422],
[0.871295453, 2352.368682],
[0.8716451, 2355.550711],
[0.871994353, 2358.734511],
[0.872343214, 2361.920081],
[0.872691682, 2365.107421],
[0.873039759, 2368.296531],
[0.873387445, 2371.48741],
[0.87373474, 2374.68006],
[0.874081646, 2377.87448],
[0.874428163, 2381.07067],
[0.874774291, 2384.26863],
[0.875120031, 2387.46836],
[0.875465385, 2390.669859],
[0.875810351, 2393.873129],
[0.876154932, 2397.078168],
[0.876499127, 2400.284977],
[0.876842938, 2403.493556],
[0.877186364, 2406.703905],
[0.877529407, 2409.916023],
[0.877872066, 2413.129911],
[0.878214344, 2416.345569],
[0.87855624, 2419.562996],
[0.878897754, 2422.782193],
[0.879238888, 2426.00316],
[0.879579642, 2429.225896],
[0.879920016, 2432.450401],
[0.880260012, 2435.676676],
[0.88059963, 2438.904721],
[0.88093887, 2442.134535],
[0.881277733, 2445.366118],
[0.881616219, 2448.599471],
[0.88195433, 2451.834593],
[0.882292065, 2455.071485],
[0.882629426, 2458.310146],
[0.882966413, 2461.550576],
[0.883303026, 2464.792775],
[0.883639266, 2468.036743],
[0.883975134, 2471.282481],
[0.88431063, 2474.529988],
[0.884645755, 2477.779264],
[0.884980509, 2481.030309],
[0.885314893, 2484.283122],
[0.885648907, 2487.537705],
[0.885982552, 2490.794057],
[0.88631583, 2494.052178],
[0.886648739, 2497.312068],
[0.886981281, 2500.573727],
[0.887313456, 2503.837154],
[0.887645265, 2507.102351],
[0.887976709, 2510.369316],
[0.888307787, 2513.63805],
[0.888638501, 2516.908552],
[0.888968851, 2520.180823],
[0.889298838, 2523.454863],
[0.889628462, 2526.730672],
[0.889957723, 2530.008249],
[0.890286623, 2533.287594],
[0.890615161, 2536.568708],
[0.890943339, 2539.851591],
[0.891271157, 2543.136242],
[0.891598615, 2546.422661],
[0.891925714, 2549.710849],
[0.892252455, 2553.000805],
[0.892578837, 2556.292529],
[0.892904862, 2559.586022],
[0.893230531, 2562.881282],
[0.893555842, 2566.178311],
[0.893880798, 2569.477108],
[0.894205399, 2572.777673],
[0.894529645, 2576.080006],
[0.894853536, 2579.384107],
[0.895177074, 2582.689976],
[0.895500259, 2585.997613],
[0.89582309, 2589.307018],
[0.89614557, 2592.61819],
[0.896467698, 2595.931131],
[0.896789475, 2599.245839],
[0.897110901, 2602.562315],
[0.897431976, 2605.880558],
[0.897752703, 2609.200569],
[0.89807308, 2612.522348],
[0.898393108, 2615.845894],
[0.898712788, 2619.171208],
[0.899032121, 2622.498289],
[0.899351107, 2625.827138],
[0.899669745, 2629.157754],
[0.899988038, 2632.490137],
[0.900305985, 2635.824288],
[0.900623587, 2639.160205],
[0.900940845, 2642.49789],
[0.901257758, 2645.837342],
[0.901574327, 2649.178561],
[0.901890554, 2652.521547],
[0.902206437, 2655.8663],
[0.902521979, 2659.21282],
[0.902837178, 2662.561107],
[0.903152037, 2665.911161],
[0.903466554, 2669.262982],
[0.903780732, 2672.616569],
[0.904094569, 2675.971923],
[0.904408067, 2679.329043],
[0.904721227, 2682.68793],
[0.905034048, 2686.048584],
[0.905346531, 2689.411004],
[0.905658676, 2692.775191],
[0.905970485, 2696.141144],
[0.906281957, 2699.508863],
[0.906593093, 2702.878348],
[0.906903893, 2706.2496],
[0.907214358, 2709.622618],
[0.907524489, 2712.997402],
[0.907834285, 2716.373952],
[0.908143748, 2719.752268],
[0.908452877, 2723.13235],
[0.908761674, 2726.514197],
[0.909070137, 2729.897811],
[0.909378269, 2733.28319],
[0.90968607, 2736.670335],
[0.909993539, 2740.059246],
[0.910300678, 2743.449922],
[0.910607486, 2746.842363],
[0.910913965, 2750.236571],
[0.911220115, 2753.632543],
[0.911525935, 2757.030281],
[0.911831427, 2760.429784],
[0.912136591, 2763.831052],
[0.912441428, 2767.234086],
[0.912745937, 2770.638884],
[0.91305012, 2774.045448],
[0.913353976, 2777.453776],
[0.913657506, 2780.863869],
[0.913960711, 2784.275727],
[0.914263591, 2787.68935],
[0.914566146, 2791.104738],
[0.914868377, 2794.52189],
[0.915170285, 2797.940807],
[0.915471868, 2801.361488],
[0.915773129, 2804.783933],
[0.916074067, 2808.208143],
[0.916374683, 2811.634117],
[0.916674978, 2815.061855],
[0.916974951, 2818.491357],
[0.917274603, 2821.922624],
[0.917573934, 2825.355654],
[0.917872945, 2828.790448],
[0.918171637, 2832.227006],
[0.918470009, 2835.665328],
[0.918768062, 2839.105413],
[0.919065796, 2842.547262],
[0.919363212, 2845.990875],
[0.919660311, 2849.43625],
[0.919957092, 2852.88339],
[0.920253556, 2856.332292],
[0.920549703, 2859.782958],
[0.920845533, 2863.235386],
[0.921141048, 2866.689578],
[0.921436248, 2870.145532],
[0.921731132, 2873.60325],
[0.922025701, 2877.06273],
[0.922319955, 2880.523973],
[0.922613896, 2883.986978],
[0.922907523, 2887.451746],
[0.923200836, 2890.918276],
[0.923493837, 2894.386569],
[0.923786524, 2897.856623],
[0.9240789, 2901.32844],
[0.924370963, 2904.802019],
[0.924662715, 2908.27736],
[0.924954155, 2911.754463],
[0.925245284, 2915.233327],
[0.925536103, 2918.713953],
[0.925826612, 2922.196341],
[0.92611681, 2925.68049],
[0.926406699, 2929.1664],
[0.926696279, 2932.654072],
[0.926985549, 2936.143505],
[0.927274511, 2939.634699],
[0.927563165, 2943.127654],
[0.927851511, 2946.622369],
[0.928139549, 2950.118846],
[0.92842728, 2953.617083],
[0.928714703, 2957.11708],
[0.92900182, 2960.618838],
[0.92928863, 2964.122356],
[0.929575135, 2967.627635],
[0.929861333, 2971.134673],
[0.930147226, 2974.643472],
[0.930432814, 2978.15403],
[0.930718096, 2981.666348],
[0.931003074, 2985.180426],
[0.931287748, 2988.696263],
[0.931572117, 2992.21386],
[0.931856183, 2995.733216],
[0.932139945, 2999.254331],
[0.932423404, 3002.777205],
[0.93270656, 3006.301838],
[0.932989413, 3009.828229],
[0.933271964, 3013.35638],
[0.933554213, 3016.886288],
[0.933836159, 3020.417956],
[0.934117804, 3023.951381],
[0.934399148, 3027.486565],
[0.93468019, 3031.023506],
[0.934960931, 3034.562205],
[0.935241372, 3038.102663],
[0.935521512, 3041.644877],
[0.935801352, 3045.188849],
[0.936080892, 3048.734579],
[0.936360133, 3052.282065],
[0.936639073, 3055.831309],
[0.936917715, 3059.382309],
[0.937196057, 3062.935066],
[0.9374741, 3066.48958],
[0.937751845, 3070.04585],
[0.938029291, 3073.603876],
[0.938306439, 3077.163658],
[0.938583289, 3080.725197],
[0.938859841, 3084.288491],
[0.939136096, 3087.853541],
[0.939412053, 3091.420346],
[0.939687712, 3094.988906],
[0.939963075, 3098.559222],
[0.94023814, 3102.131292],
[0.940512909, 3105.705118],
[0.940787381, 3109.280698],
[0.941061556, 3112.858032],
[0.941335436, 3116.437121],
[0.941609019, 3120.017963],
[0.941882306, 3123.60056],
[0.942155297, 3127.18491],
[0.942427992, 3130.771014],
[0.942700392, 3134.358872],
[0.942972496, 3137.948482],
[0.943244305, 3141.539846],
[0.943515819, 3145.132962],
[0.943787037, 3148.727831],
[0.944057961, 3152.324452],
[0.944328589, 3155.922825],
[0.944598923, 3159.522951],
[0.944868962, 3163.124828],
[0.945138706, 3166.728456],
[0.945408155, 3170.333837],
[0.945677311, 3173.940968],
[0.945946171, 3177.54985],
[0.946214737, 3181.160483],
[0.946483009, 3184.772866],
[0.946750987, 3188.387],
[0.94701867, 3192.002884],
[0.947286059, 3195.620517],
[0.947553154, 3199.2399],
[0.947819955, 3202.861033],
[0.948086461, 3206.483914],
[0.948352674, 3210.108545],
[0.948618592, 3213.734924],
[0.948884216, 3217.363051],
[0.949149546, 3220.992926],
[0.949414582, 3224.62455],
[0.949679323, 3228.257921],
[0.94994377, 3231.893039],
[0.950207923, 3235.529904],
[0.950471781, 3239.168517],
[0.950735345, 3242.808875],
[0.950998615, 3246.45098],
[0.95126159, 3250.094831],
[0.95152427, 3253.740428],
[0.951786656, 3257.38777],
[0.952048746, 3261.036858],
[0.952310542, 3264.68769],
[0.952572043, 3268.340266],
[0.952833248, 3271.994587],
[0.953094158, 3275.650652],
[0.953354772, 3279.30846],
[0.953615091, 3282.968012],
[0.953875114, 3286.629307],
[0.954134841, 3290.292344],
[0.954394271, 3293.957123],
[0.954653405, 3297.623645],
[0.954912243, 3301.291908],
[0.955170783, 3304.961913],
[0.955429027, 3308.633658],
[0.955686973, 3312.307144],
[0.955944621, 3315.982371],
[0.956201972, 3319.659337],
[0.956459024, 3323.338043],
[0.956715778, 3327.018488],
[0.956972233, 3330.700671],
[0.957228388, 3334.384593],
[0.957484245, 3338.070253],
[0.957739801, 3341.757651],
[0.957995057, 3345.446786],
[0.958250013, 3349.137657],
[0.958504667, 3352.830265],
[0.958759021, 3356.524609],
[0.959013072, 3360.220689],
[0.959266821, 3363.918503],
[0.959520268, 3367.618052],
[0.959773411, 3371.319335],
[0.96002625, 3375.022352],
[0.960278785, 3378.727103],
[0.960531016, 3382.433586],
[0.960782941, 3386.141801],
[0.96103456, 3389.851749],
[0.961285873, 3393.563427],
[0.961536878, 3397.276837],
[0.961787576, 3400.991977],
[0.962037966, 3404.708847],
[0.962288047, 3408.427446],
[0.962537818, 3412.147773],
[0.962787278, 3415.869829],
[0.963036428, 3419.593613],
[0.963285265, 3423.319124],
[0.96353379, 3427.046362],
[0.963782002, 3430.775325],
[0.964029899, 3434.506014],
[0.964277481, 3438.238428],
[0.964524747, 3441.972566],
[0.964771696, 3445.708427],
[0.965018327, 3449.446012],
[0.965264639, 3453.185319],
[0.965510632, 3456.926347],
[0.965756303, 3460.669097],
[0.966001653, 3464.413567],
[0.96624668, 3468.159757],
[0.966491383, 3471.907665],
[0.966735761, 3475.657292],
[0.966979812, 3479.408637],
[0.967223536, 3483.161698],
[0.96746693, 3486.916476],
[0.967709995, 3490.672969],
[0.967952729, 3494.431176],
[0.968195129, 3498.191097],
[0.968437196, 3501.952732],
[0.968678927, 3505.716078],
[0.968920321, 3509.481136],
[0.969161376, 3513.247904],
[0.969402092, 3517.016382],
[0.969642465, 3520.786569],
[0.969882496, 3524.558464],
[0.970122181, 3528.332066],
[0.97036152, 3532.107374],
[0.97060051, 3535.884387],
[0.970839149, 3539.663104],
[0.971077437, 3543.443525],
[0.97131537, 3547.225648],
[0.971552947, 3551.009472],
[0.971790166, 3554.794996],
[0.972027025, 3558.582219],
[0.972263521, 3562.371141],
[0.972499653, 3566.161759],
[0.972735418, 3569.954074],
[0.972970813, 3573.748083],
[0.973205837, 3577.543786],
[0.973440486, 3581.341181],
[0.973674759, 3585.140268],
[0.973908653, 3588.941044],
[0.974142164, 3592.74351],
[0.974375291, 3596.547663],
[0.974608031, 3600.353502],
[0.974840379, 3604.161026],
[0.975072335, 3607.970233],
[0.975303894, 3611.781123],
[0.975535053, 3615.593693],
[0.975765809, 3619.407943],
[0.975996159, 3623.22387],
[0.976226099, 3627.041474],
[0.976455626, 3630.860753],
[0.976684736, 3634.681705],
[0.976913426, 3638.504328],
[0.977141691, 3642.328622],
[0.977369528, 3646.154584],
[0.977596932, 3649.982212],
[0.9778239, 3653.811506],
[0.978050428, 3657.642463],
[0.97827651, 3661.475081],
[0.978502142, 3665.309359],
[0.97872732, 3669.145294],
[0.978952039, 3672.982885],
[0.979176293, 3676.82213],
[0.979400079, 3680.663026],
[0.979623391, 3684.505572],
[0.979846223, 3688.349766],
[0.98006857, 3692.195605],
[0.980290426, 3696.043088],
[0.980511786, 3699.892211],
[0.980732644, 3703.742973],
[0.980952993, 3707.595371],
[0.981172827, 3711.449403],
[0.98139214, 3715.305067],
[0.981610925, 3719.162359],
[0.981829176, 3723.021278],
[0.982046884, 3726.88182],
[0.982264044, 3730.743983],
[0.982480647, 3734.607765],
[0.982696686, 3738.473162],
[0.982912154, 3742.340172],
[0.983127041, 3746.208791],
[0.98334134, 3750.079016],
[0.983555043, 3753.950845],
[0.983768139, 3757.824274],
[0.983980621, 3761.6993],
[0.98419248, 3765.57592],
[0.984403705, 3769.45413],
[0.984614286, 3773.333926],
[0.984824215, 3777.215306],
[0.98503348, 3781.098265],
[0.98524207, 3784.9828],
[0.985449975, 3788.868907],
[0.985657184, 3792.756581],
[0.985863685, 3796.64582],
[0.986069466, 3800.536618],
[0.986274515, 3804.428971],
[0.986478819, 3808.322876],
[0.986682366, 3812.218327],
[0.986885142, 3816.11532],
[0.987087133, 3820.013851],
[0.987288326, 3823.913914],
[0.987488707, 3827.815505],
[0.98768826, 3831.718619],
[0.987886971, 3835.62325],
[0.988084824, 3839.529393],
[0.988281803, 3843.437042],
[0.988477892, 3847.346193],
[0.988673074, 3851.256839],
[0.988867331, 3855.168975],
[0.989060647, 3859.082594],
[0.989253002, 3862.997691],
[0.98944438, 3866.914258],
[0.989634759, 3870.83229],
[0.989824122, 3874.75178],
[0.990012448, 3878.672721],
[0.990199717, 3882.595107],
[0.990385908, 3886.518929],
[0.990570999, 3890.444181],
[0.990754969, 3894.370855],
[0.990937795, 3898.298943],
[0.991119454, 3902.228438],
[0.991299923, 3906.159331],
[0.991479179, 3910.091614],
[0.991657196, 3914.025279],
[0.99183395, 3917.960317],
[0.992009416, 3921.896719],
[0.992183567, 3925.834476],
[0.992356379, 3929.773578],
[0.992527822, 3933.714016],
[0.992697872, 3937.65578],
[0.992866499, 3941.598861],
[0.993033676, 3945.543246],
[0.993199374, 3949.488927],
[0.993363564, 3953.435893],
[0.993526217, 3957.384132],
[0.993687303, 3961.333632],
[0.993846792, 3965.284384],
[0.994004654, 3969.236374],
[0.994160858, 3973.189591],
[0.994315373, 3977.144022],
[0.994468169, 3981.099655],
[0.994619213, 3985.056477],
[0.994768474, 3989.014475],
[0.994915921, 3992.973635],
[0.995061523, 3996.933944],
[0.995205248, 4000.895387],
[0.995347064, 4004.857951],
[0.995486939, 4008.821622],
[0.995624844, 4012.786383],
[0.995760747, 4016.752221],
[0.995894617, 4020.719121],
[0.996026425, 4024.687065],
[0.99615614, 4028.656039],
[0.996283734, 4032.626027],
[0.996409177, 4036.597012],
[0.996532444, 4040.568978],
[0.996653505, 4044.541908],
[0.996772337, 4048.515784],
[0.996888913, 4052.490591],
[0.997003209, 4056.466309],
[0.997115204, 4060.442922],
[0.997224876, 4064.420411],
[0.997332204, 4068.398759],
[0.997437171, 4072.377947],
[0.99753976, 4076.357956],
[0.997639956, 4080.338769],
[0.997737744, 4084.320367],
[0.997833115, 4088.30273],
[0.997926058, 4092.28584],
[0.998016567, 4096.269678],
[0.998104636, 4100.254224],
[0.998190262, 4104.23946],
[0.998273446, 4108.225366],
[0.998354188, 4112.211924],
[0.998432494, 4116.199113],
[0.998508369, 4120.186916],
[0.998581825, 4124.175312],
[0.998652871, 4128.164283],
[0.998721524, 4132.153809],
[0.998787801, 4136.143873],
[0.998851721, 4140.134455],
[0.998913307, 4144.125537],
[0.998972584, 4148.117099],
[0.99902958, 4152.109126],
[0.999084325, 4156.101597],
[0.999136852, 4160.094496],
[0.999187196, 4164.087805],
[0.999235395, 4168.081506],
[0.999281488, 4172.075584],
[0.999325517, 4176.070021],
[0.999367526, 4180.064801],
[0.99940756, 4184.059909],
[0.999445668, 4188.055328],
[0.999481899, 4192.051045],
[0.999516302, 4196.047043],
[0.99954893, 4200.043308],
[0.999579836, 4204.039827],
[0.999609075, 4208.036587],
[0.999636699, 4212.033573],
[0.999662767, 4216.030774],
[0.999687332, 4220.028177],
[0.999710452, 4224.025771],
[0.999732183, 4228.023543],
[0.999752581, 4232.021484],
[0.999771702, 4236.019583],
[0.999789602, 4240.01783],
[0.999806336, 4244.016215],
[0.99982196, 4248.01473],
[0.999836525, 4252.013365],
[0.999850086, 4256.012112],
[0.999862694, 4260.010964],
[0.9998744, 4264.009913],
[0.999885253, 4268.008953],
[0.999895301, 4272.008076],
[0.999904591, 4276.007276],
[0.999913168, 4280.006548],
[0.999921075, 4284.005885],
[0.999928354, 4288.005283],
[0.999935045, 4292.004737],
[0.999941188, 4296.004243],
[0.999946819, 4300.003795],
[0.999951973, 4304.003391],
[0.999956684, 4308.003026],
[0.999960984, 4312.002697],
[0.999964903, 4316.0024],
[0.999968469, 4320.002134],
[0.999971711, 4324.001895],
[0.999974652, 4328.001681],
[0.999977318, 4332.001489],
[0.999979729, 4336.001317],
[0.999981909, 4340.001164],
[0.999983875, 4344.001027],
[0.999985647, 4348.000905],
[0.99998724, 4352.000797],
[0.999988672, 4356.000701],
[0.999989957, 4360.000615],
[0.999991107, 4364.00054],
[0.999992137, 4368.000473],
[0.999993056, 4372.000414],
[0.999993876, 4376.000361],
[0.999994606, 4380.000315],
[0.999995256, 4384.000275],
[0.999995833, 4388.000239],
[0.999996344, 4392.000208],
[0.999996797, 4396.000181],
[0.999997198, 4400.000157],
[0.999997552, 4404.000136],
[0.999997864, 4408.000117],
[0.999998138, 4412.000101],
[0.99999838, 4416.000087],
[0.999998592, 4420.000075],
[0.999998778, 4424.000065],
[0.999998941, 4428.000056],
[0.999999083, 4432.000048],
[0.999999207, 4436.000041],
[0.999999316, 4440.000035],
[0.99999941, 4444.00003],
[0.999999492, 4448.000026],
[0.999999563, 4452.000022],
[0.999999625, 4456.000019],
[0.999999678, 4460.000016],
[0.999999724, 4464.000013],
[0.999999764, 4468.000011],
[0.999999799, 4472.00001],
[0.999999828, 4476.000008],
[0.999999854, 4480.000007],
[0.999999876, 4484.000006],
[0.999999894, 4488.000005],
[0.99999991, 4492.000004],
[0.999999924, 4496.000004],
[0.999999936, 4500.000003],
[0.999999946, 4504.000002],
[0.999999954, 4508.000002],
[0.999999961, 4512.000002],
[0.999999967, 4516.000001],
[0.999999973, 4520.000001],
[0.999999977, 4524.000001],
[0.999999981, 4528.000001],
[0.999999984, 4532.000001],
[0.999999987, 4536.000001],
[0.999999989, 4540],
[0.999999991, 4544],
[0.999999992, 4548],
[0.999999993, 4552],
[0.999999995, 4556],
[0.999999996, 4560],
[0.999999996, 4564],
[0.999999997, 4568],
[0.999999997, 4572],
[0.999999998, 4576],
[0.999999998, 4580],
[0.999999999, 4584],
[0.999999999, 4588],
[0.999999999, 4592],
[0.999999999, 4596],
[0.999999999, 4600],
[0.999999999, 4604],
[1, 4608],
[1, 4612],
[1, 4616],
[1, 4620],
[1, 4624],
[1, 4628],
[1, 4632],
[1, 4636],
[1, 4640],
[1, 4644],
[1, 4648],
[1, 4652],
[1, 4656],
[1, 4660],
[1, 4664],
[1, 4668],
[1, 4672],
[1, 4676],
[1, 4680],
[1, 4684],
[1, 4688],
[1, 4692],
[1, 4696],
[1, 4700],
[1, 4704],
[1, 4708],
[1, 4712],
[1, 4716],
[1, 4720],
[1, 4724],
[1, 4728],
[1, 4732],
[1, 4736],
[1, 4740],
[1, 4744],
[1, 4748],
[1, 4752],
[1, 4756],
[1, 4760],
[1, 4764],
[1, 4768],
[1, 4772],
[1, 4776],
[1, 4780],
[1, 4784],
[1, 4788],
[1, 4792],
[1, 4796],
[1, 4800],
[1, 4804],
[1, 4808],
[1, 4812],
[1, 4816],
[1, 4820],
[1, 4824],
[1, 4828],
[1, 4832],
[1, 4836],
[1, 4840],
[1, 4844],
[1, 4848],
[1, 4852],
[1, 4856],
[1, 4860],
[1, 4864],
[1, 4868],
[1, 4872],
[1, 4876],
[1, 4880],
[1, 4884],
[1, 4888],
[1, 4892],
[1, 4896],
[1, 4900],
[1, 4904],
[1, 4908],
[1, 4912],
[1, 4916],
[1, 4920],
[1, 4924],
[1, 4928],
[1, 4932],
[1, 4936],
[1, 4940],
[1, 4944],
[1, 4948],
[1, 4952],
[1, 4956],
[1, 4960],
[1, 4964],
[1, 4968],
[1, 4972],
[1, 4976],
[1, 4980],
[1, 4984],
[1, 4988],
[1, 4992],
[1, 4996],
[1, 5000],
[1, 5004],
[1, 5008],
[1, 5012],
[1, 5016],
[1, 5020],
[1, 5024],
[1, 5028],
[1, 5032],
[1, 5036],
[1, 5040],
[1, 5044],
[1, 5048],
[1, 5052],
[1, 5056],
[1, 5060],
[1, 5064],
[1, 5068],
[1, 5072],
[1, 5076],
[1, 5080],
[1, 5084],
[1, 5088],
[1, 5092],
[1, 5096],
[1, 5100],
[1, 5104],
[1, 5108],
[1, 5112],
[1, 5116],
[1, 5120],
[1, 5124],
[1, 5128],
[1, 5132],
[1, 5136],
[1, 5140],
[1, 5144],
[1, 5148],
[1, 5152],
[1, 5156],
[1, 5160],
[1, 5164],
[1, 5168],
[1, 5172],
[1, 5176],
[1, 5180],
[1, 5184],
[1, 5188],
[1, 5192],
[1, 5196],
[1, 5200],
[1, 5204],
[1, 5208],
[1, 5212],
[1, 5216],
[1, 5220],
[1, 5224],
[1, 5228],
[1, 5232],
[1, 5236],
[1, 5240],
[1, 5244],
[1, 5248],
[1, 5252],
[1, 5256],
[1, 5260],
[1, 5264],
[1, 5268],
[1, 5272],
[1, 5276],
[1, 5280],
[1, 5284],
[1, 5288],
[1, 5292],
[1, 5296],
[1, 5300],
[1, 5304],
[1, 5308],
[1, 5312],
[1, 5316],
[1, 5320],
[1, 5324],
[1, 5328],
[1, 5332],
[1, 5336],
[1, 5340],
[1, 5344],
[1, 5348],
[1, 5352],
[1, 5356],
[1, 5360],
[1, 5364],
[1, 5368],
[1, 5372],
[1, 5376],
[1, 5380],
[1, 5384],
[1, 5388],
[1, 5392],
[1, 5396],
[1, 5400],
[1, 5404],
[1, 5408],
[1, 5412],
[1, 5416],
[1, 5420],
[1, 5424],
[1, 5428],
[1, 5432],
[1, 5436],
[1, 5440],
[1, 5444],
[1, 5448],
[1, 5452],
[1, 5456],
[1, 5460],
[1, 5464],
[1, 5468],
[1, 5472],
[1, 5476],
[1, 5480],
[1, 5484],
[1, 5488],
[1, 5492],
[1, 5496],
[1, 5500],
[1, 5504],
[1, 5508],
[1, 5512],
[1, 5516],
[1, 5520],
[1, 5524],
[1, 5528],
[1, 5532],
[1, 5536],
[1, 5540],
[1, 5544],
[1, 5548],
[1, 5552],
[1, 5556],
[1, 5560],
[1, 5564],
[1, 5568],
[1, 5572],
[1, 5576],
[1, 5580],
[1, 5584],
[1, 5588],
[1, 5592],
[1, 5596],
[1, 5600],
[1, 5604],
[1, 5608],
[1, 5612],
[1, 5616],
[1, 5620],
[1, 5624],
[1, 5628],
[1, 5632],
[1, 5636],
[1, 5640],
[1, 5644],
[1, 5648],
[1, 5652],
[1, 5656],
[1, 5660],
[1, 5664],
[1, 5668],
[1, 5672],
[1, 5676],
[1, 5680],
[1, 5684],
[1, 5688],
[1, 5692],
[1, 5696],
[1, 5700],
[1, 5704],
[1, 5708],
[1, 5712],
[1, 5716],
[1, 5720],
[1, 5724],
[1, 5728],
[1, 5732],
[1, 5736],
[1, 5740],
[1, 5744],
[1, 5748],
[1, 5752],
[1, 5756],
[1, 5760],
[1, 5764],
[1, 5768],
[1, 5772],
[1, 5776],
[1, 5780],
[1, 5784],
[1, 5788],
[1, 5792],
[1, 5796],
[1, 5800],
[1, 5804],
[1, 5808],
[1, 5812],
[1, 5816],
[1, 5820],
[1, 5824],
[1, 5828],
[1, 5832],
[1, 5836],
[1, 5840],
[1, 5844],
[1, 5848],
[1, 5852],
[1, 5856],
[1, 5860],
[1, 5864],
[1, 5868],
[1, 5872],
[1, 5876],
[1, 5880],
[1, 5884],
[1, 5888],
[1, 5892],
[1, 5896],
[1, 5900],
[1, 5904],
[1, 5908],
[1, 5912],
[1, 5916],
[1, 5920],
[1, 5924],
[1, 5928],
[1, 5932],
[1, 5936],
[1, 5940],
[1, 5944],
[1, 5948],
[1, 5952],
[1, 5956],
[1, 5960],
[1, 5964],
[1, 5968],
[1, 5972],
[1, 5976],
[1, 5980],
[1, 5984],
[1, 5988],
[1, 5992],
[1, 5996],
[1, 6000]]
return zip(*data)
def expected_rd01():
# these data were hand-picked from an old graph found on the internet
data = [
[0.303030303, 0],
[0.303030303, 2.2],
[0.636363636, 2.4],
[1, 2.8],
[1, 3],
[1, 6]]
return zip(*data)
def expected_rd03():
# these data were hand-picked from an old graph found on the internet
data = [
[0.454545455, 6],
[0.515151515, 5],
[0.548484848, 4],
[0.581818182, 3],
[0.606060606, 2.4],
[0.621212121, 2],
[0.727272727, 1],
[0.757575758, 0.85],
[0.909090909, 0.4],
[1, 0]]
return zip(*data)
def expected_rd02():
# these data were hand-picked from an old graph found on the internet
data = [
[0.681818182, 6],
[0.757575758, 5.45],
[0.793939394, 5],
[0.840909091, 4],
[0.866666667, 3],
[0.893939394, 2],
[0.909090909, 1.2],
[1, 0]]
return zip(*data)
def bw01_05():
f = open("../../tests/broadbridge_white/gold/bw01_swater_0051.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [10 - d[2] for d in data]
return (s, x)
def bw01_2():
f = open("../../tests/broadbridge_white/gold/bw01_swater_0171.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [10 - d[2] for d in data]
return (s, x)
def bw01_8():
f = open("../../tests/broadbridge_white/gold/bw01_swater_0311.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [10 - d[2] for d in data]
return (s, x)
def wli01_100():
f = open("../../tests/broadbridge_white/gold/wli01_swater_0100.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [- d[2] for d in data]
return (s, x)
def wli01_500():
f = open("../../tests/broadbridge_white/gold/wli01_swater_0500.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [- d[2] for d in data]
return (s, x)
def wli01_1000():
f = open("../../tests/broadbridge_white/gold/wli01_swater_1000.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [- d[2] for d in data]
return (s, x)
def rd01():
f = open("../../tests/broadbridge_white/gold/rd01_swater_0161.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
def rd02():
f = open("../../tests/broadbridge_white/gold/rd02_swater_0114.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
def rd03():
f = open("../../tests/broadbridge_white/gold/rd03_swater_0015.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
plt.figure()
plt.plot(expected_rd01()[0], expected_rd01()[1], 'bs', linewidth = 2.0, label = 'HYDRUS (approx)')
plt.plot(rd01()[0], rd01()[1], 'b-', markersize = 6.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Saturation")
plt.ylabel("Height (m) (infiltration takes place at h=6)")
plt.title("Saturation profile in the caisson after 4.16 days of infiltration")
plt.axis([0.2, 1.1, 0, 6])
plt.savefig("rd01.pdf")
plt.figure()
plt.plot(expected_rd02()[0], expected_rd02()[1], 'bs', markersize = 6.0, label = 'HYDRUS (approx) 4 days')
plt.plot(expected_rd03()[0], expected_rd03()[1], 'rs', markersize = 6.0, label = 'HYDRUS (approx) 100 days')
plt.plot(rd02()[0], rd02()[1], 'b-', linewidth = 2.0, label = 'MOOSE 4 days')
plt.plot(rd03()[0], rd03()[1], 'r-', linewidth = 2.0, label = 'MOOSE 100 days')
plt.legend(loc = 'lower left')
plt.xlabel("Saturation")
plt.ylabel("Height (m) (drainage takes place from h=0)")
plt.title("Saturation profile after drainage from an initially-saturated caisson")
plt.savefig("rd02.pdf")
sys.exit(0)
plt.figure()
plt.plot(expected_wli01_100()[0], expected_wli01_100()[1], 'b-', linewidth = 2.0, label = 'Analytic (t=100)')
plt.plot(wli01_100()[0], wli01_100()[1], 'bs', markersize = 6.0, label = 'MOOSE (t=100)')
plt.plot(expected_wli01_500()[0], expected_wli01_500()[1], 'k-', linewidth = 2.0, label = 'Analytic (t=500)')
plt.plot(wli01_500()[0], wli01_500()[1], 'ks', markersize = 6.0, label = 'MOOSE (t=500)')
plt.plot(expected_wli01_1000()[0], expected_wli01_1000()[1], 'r-', linewidth = 2.0, label = 'Analytic (t=1000)')
plt.plot(wli01_1000()[0], wli01_1000()[1], 'rs', markersize = 6.0, label = 'MOOSE (t=1000)')
plt.legend(loc = 'lower left')
plt.xlabel("Saturation")
plt.ylabel("Depth (m)")
plt.title("Saturation profiles as a function of depth for drainage from base")
plt.axis([0, 1.1, 0, 5000])
plt.gca().invert_yaxis()
plt.savefig("wli.pdf")
sys.exit(0)
plt.figure()
plt.plot(expected_bw01_05()[0], expected_bw01_05()[1], 'b-', linewidth = 2.0, label = 'Analytic (t=0.5)')
plt.plot(bw01_05()[0], bw01_05()[1], 'bs', markersize = 6.0, label = 'MOOSE (t=0.5)')
plt.plot(expected_bw01_2()[0], expected_bw01_2()[1], 'k-', linewidth = 2.0, label = 'Analytic (t=2)')
plt.plot(bw01_2()[0], bw01_2()[1], 'ks', markersize = 6.0, label = 'MOOSE (t=2)')
plt.plot(expected_bw01_8()[0], expected_bw01_8()[1], 'r-', linewidth = 2.0, label = 'Analytic (t=8)')
plt.plot(bw01_8()[0], bw01_8()[1], 'rs', markersize = 6.0, label = 'MOOSE (t=8)')
plt.legend(loc = 'lower right')
plt.xlabel("Saturation")
plt.ylabel("Depth (m)")
plt.title("Saturation profiles as a function of depth for constant infiltration")
plt.axis([0, 0.9, 0, 10])
plt.gca().invert_yaxis()
plt.savefig("bw.pdf")
sys.exit(0)
|
lgpl-2.1
|
fengzhyuan/scikit-learn
|
sklearn/datasets/twenty_newsgroups.py
|
126
|
13591
|
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
|
bsd-3-clause
|
bricegnichols/urbansim
|
urbansim/tests/test_accounts.py
|
5
|
2349
|
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import accounts
@pytest.fixture(scope='module')
def acc_name():
return 'test'
@pytest.fixture(scope='module')
def acc_bal():
return 1000
@pytest.fixture
def acc(acc_name, acc_bal):
return accounts.Account(acc_name, acc_bal)
def test_init(acc, acc_name):
assert acc.name == acc_name
assert acc.balance == 1000
assert acc.transactions == []
def test_add_transaction(acc, acc_bal):
amount = -50
subaccount = ('a', 'b', 'c')
metadata = {'for': 'light speed engine'}
acc.add_transaction(amount, subaccount, metadata)
assert len(acc.transactions) == 1
assert acc.balance == acc_bal + amount
t = acc.transactions[-1]
assert isinstance(t, accounts.Transaction)
assert t.amount == amount
assert t.subaccount == subaccount
assert t.metadata == metadata
def test_add_transactions(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
t3 = (-100, ('a', 'b', 'c'), 'Acme Corp.')
t4 = (42, None, None)
acc.add_transactions((t1, t2, t3, t4))
assert len(acc.transactions) == 4
assert acc.balance == acc_bal + t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions() == t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions_by_subacct(('a', 'b', 'c')) == t1[0] + t3[0]
assert acc.total_transactions_by_subacct(None) == t2[0] + t4[0]
assert list(acc.all_subaccounts()) == [('a', 'b', 'c'), None]
assert list(acc.iter_subaccounts()) == [
(('a', 'b', 'c'), t1[0] + t3[0]),
(None, t2[0] + t4[0])]
def test_column_names_from_metadata():
cnfm = accounts._column_names_from_metadata
assert cnfm([]) == []
assert cnfm([{'a': 1, 'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1}, {'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == ['a', 'b']
def test_to_frame(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
acc.add_transactions((t1, t2))
expected = pd.DataFrame(
[[200, ('a', 'b', 'c'), None],
[-50, None, 'Acme Corp.']],
columns=['amount', 'subaccount', 'to'])
df = acc.to_frame()
pdt.assert_frame_equal(df, expected)
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
124
|
1877
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
|
bsd-3-clause
|
bnaul/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
17
|
5201
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphicalLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphicalLasso setting the sparsity of the model is
set by internal cross-validation in the GraphicalLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphicalLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
# #############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
# #############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphicalLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
# #############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphicalLassoCV', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphicalLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
if hasattr(ax, 'set_facecolor'):
ax.set_facecolor('.7')
else:
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_results_["alphas"], model.cv_results_["mean_score"], 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
weidel-p/nest-simulator
|
pynest/examples/repeated_stimulation.py
|
2
|
4210
|
# -*- coding: utf-8 -*-
#
# repeated_stimulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Repeated Stimulation
--------------------
Simple example for how to repeat a stimulation protocol
using the ``origin`` property of devices.
In this example, a ``poisson_generator`` generates a spike train that is
recorded directly by a ``spike_detector``, using the following paradigm:
1. A single trial last for 1000 ms.
2. Within each trial, the ``poisson_generator`` is active from 100 ms to 500 ms.
We achieve this by defining the `start` and `stop` properties of the
generator to 100 ms and 500 ms, respectively, and setting the ``origin`` to the
simulation time at the beginning of each trial. Start and stop are interpreted
relative to the ``origin``.
"""
###############################################################################
# First, the modules needed for simulation and analyis are imported.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
###############################################################################
# Second, we set the parameters so the ``poisson_generator`` generates 1000
# spikes per second and is active from 100 to 500 ms
rate = 1000.0 # generator rate in spikes/s
start = 100.0 # start of simulation relative to trial start, in ms
stop = 500.0 # end of simulation relative to trial start, in ms
###############################################################################
# The simulation is supposed to take 1s (1000 ms) and is repeated 5 times
trial_duration = 1000.0 # trial duration, in ms
num_trials = 5 # number of trials to perform
###############################################################################
# Third, the network is set up. We reset the kernel and create a
# ``poisson_generator``, in which the handle is stored in `pg`.
#
# The parameters for rate and start and stop of activity are given as optional
# parameters in the form of a dictionary.
nest.ResetKernel()
pg = nest.Create('poisson_generator',
params={'rate': rate,
'start': start,
'stop': stop}
)
###############################################################################
# The ``spike_detector`` is created and the handle stored in `sd`.
sd = nest.Create('spike_detector')
###############################################################################
# The ``Connect`` function connects the nodes so spikes from pg are collected by
# the ``spike_detector`` `sd`
nest.Connect(pg, sd)
###############################################################################
# Before each trial, we set the ``origin`` of the ``poisson_generator`` to the
# current simulation time. This automatically sets the start and stop time of
# the ``poisson_generator`` to the specified times with respect to the origin.
# The simulation is then carried out for the specified time in trial_duration.
for n in range(num_trials):
pg.origin = nest.GetKernelStatus('time')
nest.Simulate(trial_duration)
###############################################################################
# Now we plot the result, including a histogram using the ``nest.raster_plot``
# function. Note: The histogram will show spikes seemingly located before
# 100 ms into each trial. This is due to sub-optimal automatic placement of
# histogram bin borders.
nest.raster_plot.from_device(sd, hist=True, hist_binwidth=100.,
title='Repeated stimulation by Poisson generator')
plt.show()
|
gpl-2.0
|
shikhar413/openmc
|
tests/regression_tests/mgxs_library_distribcell/test.py
|
7
|
2724
|
import hashlib
import sys
import openmc
import openmc.mgxs
from openmc.examples import pwr_assembly
import pytest
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super().__init__(*args, **kwargs)
# Initialize a one-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 20.e6])
# Initialize MGXS Library for a few cross section types
# for one material-filled cell in the geometry
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test all relevant MGXS types
relevant_MGXS_TYPES = [item for item in openmc.mgxs.MGXS_TYPES
if item != 'current']
self.mgxs_lib.mgxs_types = tuple(relevant_MGXS_TYPES) + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'distribcell'
cells = self.mgxs_lib.geometry.get_all_material_cells().values()
self.mgxs_lib.domains = [c for c in cells if c.name == 'fuel']
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
self._model.tallies.export_to_xml()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Average the MGXS across distribcell subdomains
avg_lib = self.mgxs_lib.get_subdomain_avg_library()
# Build a string from Pandas Dataframe for each 1-group MGXS
outstr = ''
for domain in avg_lib.domains:
for mgxs_type in avg_lib.mgxs_types:
mgxs = avg_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
@pytest.mark.xfail(sys.version_info < (3, 6),
reason="Pandas 1.0 API changed and requires Python 3.6+")
def test_mgxs_library_distribcell():
model = pwr_assembly()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
|
mit
|
rronen/HAF-score
|
hfs_utils.py
|
1
|
10215
|
''' utility for computing the haplotype frequency spectrum (HFS) and meta-HFS '''
import sys
from collections import defaultdict
import numpy as np
from sklearn import preprocessing
import scipy.cluster.hierarchy as hier
''' internal imports '''
import cfp_score as cfp
import params as p
###############################################################################
def get_hfs( hap_mat, col_freqs, type, clf=None ):
''' Compute the meta-haplotype frequency spectrum using specified method '''
if(type == 'exact'):
return exact_hfs( hap_mat )
elif( type == 'flt-f' ):
return mhfs_flt_f( hap_mat, col_freqs )
elif( type == 'flt-f-cfp' ):
if( clf is None ):
return mhfs_flt_f_cfp( hap_mat, col_freqs )
else:
return mhfs_flt_f_cfp_clf( hap_mat, col_freqs, clf )
elif( type == 'clust-hier' ):
return mhfs_clust_hier( hap_mat, col_freqs )
else:
print "[get_hfs]:error, unrecognized meta-haplotype method: %s" % type
sys.exit(1)
###############################################################################
def exact_hfs( hap_mat ):
''' Computes the exact HFS, given haplotype matrix.
Returns np.array historgram of haplotype frequencies.
'''
# haplotype counts
counts = defaultdict( int )
for h in hap_mat: counts[ h.tostring() ] += 1
# haplotype frequency spectrum
return counts_to_hist( counts )
###############################################################################
def mhfs_flt_f( hap_mat, col_freqs, bacol=None ):
''' Computes meta-haplotype frequency spectrum, given haplotype matrix.
Clusters by filtering low frequency alleles.
Returns histogram (np.array) of meta-haplotype frequencies.
'''
# filter low frequency alleles
flt_hap_mat = hap_mat[:, col_freqs > p.flt_freq ]
# meta-haplotype counts
counts = defaultdict(int)
for h in flt_hap_mat: counts[ h.tostring() ] += 1
# meta-haplotype frequency spectrum
return counts_to_hist( counts )
###############################################################################
def mhfs_flt_f_cfp( hap_mat, col_freqs ):
''' Computes meta-haplotype frequency spectrum, given haplotype matrix.
Clusters by filtering low frequency mutations inriched in individuals with high Clade Fitness Proxy (CFP) scores.
Returns histogram (np.array) of meta-haplotype frequencies.
'''
keep = np.ones( len(col_freqs) ) # 1=keep, initially keep all
# mutation CFP scores
mutation_cfp_scores = cfp.mutation_CFP_scores( hap_mat, col_freqs )
# if unfolded, determine regime
if( not p.fold_freq ):
# mean CFP of non fixed mutations
if( np.mean( mutation_cfp_scores[ col_freqs < 1.0 ] ) > 3.5 ):
high = True # high mean, pre fixation
else:
high = False # low mean, post fixation
# mark columns (mutations) for removal
for i in range( len(col_freqs) ):
f_i, cfp_i = col_freqs[i], mutation_cfp_scores[i]
# filter fixed, uninformative for clustering
if( f_i == 0.0 ): keep[i] = 0 # only b-allele (kept even if 0-column, has cfp_i=np.nan)
# if( f_i == 1.0 ): keep[i] = 0 # only unfolded (fixed alleles kept)
# apply filtering rule
if( p.fold_freq ):
# just frequency
# if(f_i < 0.25): keep[i] = 0
# linear constraint
if( cfp_i < -5.8333*f_i+1.75 ): keep[i] = 0 # (0.30,0) m=-5.833=-1.75/0.3
# if( cfp_i < -4*f_i+2.2 ): keep[i] = 0
# if( cfp_i < -10.0*f_i + 2.00 ): keep[i] = 0 # (0.20,0) m=-10
# if( cfp_i < -7.00*f_i + 1.75 ): keep[i] = 0 # (0.25,0) m=-7
# bottom left rectangle
# if( cfp_i < 1.75 and f_i < 0.25 ): keep[i] = 0
else:
# just frequency
# if(f_i < 0.35): keep[i] = 0
# linear constraint
# if( cfp_i < -5.1*f_i+4 ): keep[i] = 0 # (0.0,4.0),(1.275,0.0)
# both rectangles
# if( cfp_i > p.flt_cfp_h and f_i < p.flt_freq_h ):
# keep[i] = 0
# elif( cfp_i < p.flt_cfp_l and f_i < p.flt_freq_l ):
# keep[i] = 0
# conditional rectangles
if(high):
if( cfp_i > 3.75 and f_i < 0.2 ): keep[i] = 0
else:
if( cfp_i < -5.1*f_i+4.0 ): keep[i] = 0 # (0.0,4.0),(1.275,0.0)
# filtered meta-haplotype matrix
flt_hap_mat = hap_mat[:, keep > 0 ]
# cluster remaining columns -- REMOVE?
# col_freqs = col_freqs[ keep > 0 ]
# return mhfs_clust_hier( flt_hap_mat, col_freqs )
# meta-haplotype counts
counts = defaultdict(int)
for h in flt_hap_mat: counts[ h.tostring() ] += 1
# meta-haplotype frequency spectrum
return counts_to_hist( counts )
###############################################################################
def mhfs_flt_f_cfp_clf( hap_mat, col_freqs, clf ):
''' Computes meta-haplotype frequency spectrum, given haplotype matrix and a 2D classifier of freq/CFP.
Clusters by filtering mutations deemed sweep class by the classifier.
Returns histogram (np.array) of meta-haplotype frequencies.
'''
# remove columns with freq=0 (cannot be classified, as they have CFP=np.npn)
hap_mat = hap_mat[:, col_freqs > 0 ]
col_freqs = col_freqs[ col_freqs > 0 ]
# mutation CFP scores
mutation_cfp_scores = cfp.mutation_CFP_scores( hap_mat, col_freqs )
# data points for classification
A = np.c_[ col_freqs, mutation_cfp_scores ]
# predict
# A = preprocessing.normalize( A )
# A = preprocessing.scale( A )
pred = clf.predict( A ) # 0 neutral, 1 sweep
# pred = clf.predict_proba( A )
# print "%i mutations" % len( pred )
# print "%i sweep1, %i neutr0" % ( len( pred[ pred == 1 ] ), len( pred[ pred == 0 ] ) )
# filtered meta-haplotype matrix using predicted labels
flt_hap_mat = hap_mat[:, pred == 0 ]
# flt_hap_mat = hap_mat[:, pred[:,1] > 0.45 ]
# cluster reduced matrix
col_freqs = col_freqs[ pred == 0 ]
return mhfs_clust_hier( flt_hap_mat, col_freqs )
# meta-haplotype counts
counts = defaultdict(int)
for h in flt_hap_mat: counts[ h.tostring() ] += 1
# print "mH counts: ", sorted( counts.values(), reverse=True )
# raw_input("Press Enter to continue...")
# print
# meta-haplotype frequency spectrum
return counts_to_hist( counts )
###############################################################################
def mhfs_clust_hier( hap_mat, col_freqs ):
# filter low frequency alleles
# hap_mat = hap_mat[:, (col_freqs > 0.3)]
# Z = hier.linkage( hap_mat, method='ward', metric='euclidean' )
Z = hier.linkage( hap_mat, method='average', metric='hamming' )
clusters = hier.fcluster( Z, 1.0 )
counts = defaultdict(int)
for m in clusters: counts[m] += 1
# meta-haplotype frequency spectrum
return counts_to_hist( counts )
###############################################################################
def counts_to_hist( counts_dict ):
''' Computes a scaled histogram from the values of given dict.
Returns hist (np.array) scaled by bin centers.
'''
# convert counts to frequencies
if(p.scale_counts):
# re-scale counts to [0,1]
freqs = np.array( counts_dict.values() ) / float( p.sample_size )
else:
# raw counts
freqs = counts_dict.values()
# make bins for histogram, if not made already
if(p.bins is None and not p.scale_counts): p.bins = np.arange(0, p.sample_size+1, 1)
if(p.bins is None and p.scale_counts): p.bins = make_bins()
# histogram
hist, p.bin_edges = np.histogram(freqs , p.bins)
hist = hist.astype( np.float64 )
# histogram scaling
if( p.scale_counts ):
# sclaed bin centers
centers = 0.5 * ( p.bin_edges[:-1] + p.bin_edges[1:] )
hist *= centers**2 # >1 to scale by polynomial of bin-centers
else:
# actual descrete bins
hist *= np.arange(0, p.sample_size, 1)
return hist
###############################################################################
def make_bins():
''' Makes bins for haplotype frequency spectra.
Returns np.array of bin boundaries, in [0,1] or [0, sample-size].
'''
bin_dist = 'const' # 'low-high' 'exp-inc'
if( p.scale_counts ):
if( bin_dist == 'const' ):
# constant bin sizes
r = np.arange( 0, 1.00001, 1.0/p.nbins )
elif(bin_dist == 'low-high'):
# low/high bin sizes
r1 = np.arange(0.0, 0.201, 0.04) # low frequencies, high resolution
r2 = np.arange(0.3, 1.001, 0.20) # high frequencies, low resolution
r = np.concatenate( (r1, r2) )
elif( bin_dist == 'exp-inc'):
# exponentially growing bin sizes
r, f = [0.0], 0.02
while f < 1.0:
r.append(f)
f *= 1.75
r.append(1.0)
r = np.array(r)
else:
# counts, no binning
r = np.arange( 0, sample_size+1, 1 )
# write bins screen
print "\n" + "HFS bins:", r, "\n"
return r
###############################################################################
################################# DEPRACATED ##################################
###############################################################################
def hist_hapgroup_unj_freqs(clust_file):
''' Computes haplotype-cluster frequency spectrum, given a file with clustering results.
Clustering done via max-diameter subtrees in the unrooted neighbor joining tree.
Returns histogram (np.array) of halohroup frequnecies.
'''
counts = {}
f = open(clust_file)
for i,line in enumerate(f):
if('Number of labels' in line and i > 1): # skips 1st (tree total) 'Number of labels'
counts[i] = int(line.split()[4])
return counts_to_hist(counts)
|
gpl-2.0
|
TorHou/merge_pcr_duplicates
|
cross_convolution_analysis.py
|
1
|
5711
|
from matplotlib.backends.backend_pdf import PdfPages
import pysam
import os
import pandas
import numpy
import argparse
import logging
import matplotlib.pyplot as plt
####################
## ARGS INPUT ##
####################
tool_description = """
The tool calculates the cross convolution between the forward and reverse strand.
For a typical ChIPseq experiment, the cross-convolution should have a distinct peak
for a shift equal to the fragment size. Another peak for the read-length might also
be observable.
By default output is written to source file location.
Example usage:
cross_convolution_analysis.py reads.bam genome_table.tsv shift -o output.pdf
"""
# parse command line arguments
parser = argparse.ArgumentParser(description=tool_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"reads",
help="Path to read file in bam format. Remark: the file needs to be sorted.")
parser.add_argument(
'genome_table',
help="Path to the table of the reference genome (e.g. hg18) "
"listing the chromosomes and length (e.g. chr1 \t 247249719).")
parser.add_argument(
'shift',
type=int,
help="Size of the shift. You will shift the reverse strand over the forward strand" +
"in oder to determine the highest common read counts per position.")
# optional arguments
parser.add_argument(
"-o", "--outfile",
help="Write results to this file.")
parser.add_argument(
"-d", "--debug",
help="Print lots of debugging information",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
else:
logging.basicConfig(format="%(filename)s - %(levelname)s - %(message)s")
logging.info("Parsed arguments:")
logging.info(" reads: '{}'".format(args.reads))
logging.info(" genome_table: '{}'".format(args.genome_table))
logging.info(" shift: '{}'".format(args.shift))
if args.outfile:
logging.info(" outfile: enabled writing to file")
logging.info(" outfile: '{}'".format(args.outfile))
logging.info("")
if args.shift <= 0:
raise Exception("[ERROR] Shift must be a positive integer bigger than 0.")
###############################
## READ AND PROCESS DATA ##
###############################
print('[START]')
# read in the two files provided by the user
genome_table = pandas.read_table(args.genome_table, sep='\t', names=['chrom', 'length']).set_index('chrom').T.to_dict(orient='list')
pure_bam = pysam.AlignmentFile(args.reads)
# position-wise vector for the forward and reverse strand
chr_vec_forward = numpy.zeros(1)
chr_vec_reverse = numpy.zeros(1)
# vector holding the positions that the read covers
pos_vec = []
buff_chrom = ''
error_check_chr_list = []
# array holding the overall result later used for the plot
conv_array = numpy.zeros(args.shift + 1)
# go over all reads
for read in pure_bam:
# get the name of the chromosome
chrom = pure_bam.get_reference_name(read.rname)
if buff_chrom != chrom:
if chrom in error_check_chr_list:
raise Exception("[ERROR] bam file not sorted. Please sort the bam file.")
if chrom not in genome_table:
raise Exception("[ERROR] Chromosome could not be found in the genome table." +
"Please check your reference genome.")
# if we have look at all reads for a chromosome then calculated the convolution
# between forward and reverse strand
if buff_chrom != '':
print('[NOTE] finish')
print('[NOTE] Convolve for ' + buff_chrom)
conv_array += numpy.convolve(chr_vec_forward[::-1], chr_vec_reverse, mode='valid')
print('[NOTE] finish')
# init the two position-wise vectors (add additionally position for the forward strand
# with the size of the shift in order to calculate the convolution only for the shifted positions)
chr_vec_forward = numpy.zeros(genome_table[chrom][0] + args.shift)
chr_vec_reverse = numpy.zeros(genome_table[chrom][0])
buff_chrom = chrom
error_check_chr_list.extend(chrom)
print('[NOTE] Start reading for ' + chrom)
# if the sam format bit-flag = 0 then this is the forward strand
if read.flag == 0:
# just in case, check if the read is too long, i.e. going over the chromosome
# get all position that the read covers
# (for the forward strand add the shift length which is necessary for the convolution)
if read.pos+len(read.seq) > genome_table[chrom][0]:
pos_vec = [int(i+args.shift) for i in range(read.pos, genome_table[chrom][0])]
else:
pos_vec = [int(i+args.shift) for i in range(read.pos, read.pos+len(read.seq))]
# increment each position on the forward strand that the read covers
chr_vec_forward[pos_vec] += 1
# if the sam-format bit-flag = 16 then this is the reverse strand
elif read.flag == 16:
if read.pos + len(read.seq) > genome_table[chrom][0]:
pos_vec = [i for i in range(read.pos, genome_table[chrom][0])]
else:
pos_vec = [i for i in range(read.pos, read.pos+len(read.seq))]
chr_vec_reverse[pos_vec] += 1 # reverse
##############
## PLOT ##
##############
print('[NOTE] Create plot')
plotpath = os.path.dirname(os.path.abspath(__file__)) + '/'
pp = PdfPages(plotpath + 'Cross-Convolution.pdf')
plt.plot(conv_array, ls='--', marker='.', ms=10.0)
plt.xlabel('Shift Distance')
plt.ylabel('Convoluted Read Counts')
pp.savefig()
pp.close()
print('[FINISH]')
|
mit
|
kangwonlee/ECA
|
lab_01_intro/05_fill_demo.py
|
1
|
1221
|
# -*- coding: utf8 -*-
"""
Simple demo of the fill function.
"""
# "lines_bars_and_markers example code: fill_demo.py," Matplotlib 1.5.0 documentation. [Online]. Available:
# http://matplotlib.org/1.5.0/examples/lines_bars_and_markers/fill_demo.html. [Accessed: 21-Aug-2016].
# 각 문자열을 화면에 표시
# 배열, 행렬 관련 기능을 담고 있는 numpy 모듈을 불러 들임
# 관련 기능은 np. 으로 시작함
import numpy as np
# 그래프 관련 기능을 담고 있는 matplotlib.pyplot 모듈을 plt 라는 이름으로 불러 옴
# 관련 기능은 plt. 으로 시작함
import matplotlib.pyplot as plt
# data 준비 시작
# x data 생성 : 0 부터 1 사이를 500 구간으로 나눔
x = np.linspace(0, 1, 500)
# y data 생성 : exp() 함수와 sin() 함수의 곱으로 생성
# x와 같은 수의 data 를 생성함
# >>> print len(x)
# >>> print len(y)
# 위 두 결과가 같을 것임
y = np.sin(10 * 2 * np.pi * x) * np.exp(-5 * x)
# data 준비 끝
# 그래프 준비 시작
# 그래프를 그리고 x 축과의 사이를 'r' 에 따라 빨간 색으로 칠함
plt.fill(x, y, 'r')
# 모눈 생성
plt.grid(True)
# 그래프 준비 끝
# 그래프를 화면에 표시
plt.show()
|
apache-2.0
|
allenai/deep_qa
|
scripts/clean_newsqa.py
|
4
|
4932
|
# -*- coding: utf-8 -*-
"""
This script takes as input CSV files from the Maluuba NewsQA dataset.
The dataset is quite dirty by default, so this script does some preprocessing
and extracts the relevant information we neeed in the deep_qa library.
"""
import json
import logging
import os
import re
from argparse import ArgumentParser
import pandas
from tqdm import tqdm
from scipy.stats import mode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
parser = ArgumentParser(description=("Clean up a CSV file in "
"the NewsQA dataset."))
parser.add_argument('input_csv', nargs='+',
metavar="<input_csv>", type=str,
help=("Path to CSV files to clean up. Pass in "
"as many as you want, and the output "
"will be written to <input_csv>.clean"))
arguments = parser.parse_args()
for newsqa_file in arguments.input_csv:
clean_newsqa_csv(newsqa_file)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def clean_newsqa_csv(newsqa_file_path):
logger.info("cleaning up %s", newsqa_file_path)
# open the file as a csv
dataframe = pandas.read_csv(newsqa_file_path, encoding='utf-8')
dirty_rows = dataframe.values.tolist() # pylint: disable=no-member
clean_rows = []
clean_headers = ["question_text", "label", "answer_string", "passage"]
for row in tqdm(dirty_rows):
clean_row = []
# clean up dirty file
candidate_answers = re.split(r"\||,", row[2])
answer_absent_prob = float(row[3]) if isfloat(row[3]) else 1.0
passage_bad_prob = float(row[4]) if isfloat(row[4]) else 1.0
validated_answers = row[5]
raw_passage_text = row[6]
# figure out the label span (answer_span)
if validated_answers and not pandas.isnull(validated_answers):
# pick the validated answer with the most votes
# in case of tie, pick the longest one
validated_answers_dict = json.loads(validated_answers)
answer_span = max(validated_answers_dict,
key=validated_answers_dict.get)
else:
# fall back and pick the candidate answer that
# occurs most frequently.
answer_span = mode(candidate_answers)[0][0]
if (answer_span.lower() == "none" or answer_span.lower() == "bad_question" or
answer_absent_prob >= 0.5 or passage_bad_prob >= 0.5):
continue
initial_span_start, initial_span_end = [int(x) for x in
answer_span.split(":")]
if not raw_passage_text[initial_span_start:initial_span_end][-1].isalnum():
initial_span_end -= 1
raw_answer_snippet = raw_passage_text[:initial_span_start]
# count the number of spaces to add before the answer (newlines following non-newline)
num_spaces_added = len(re.findall("(?<=[^\\n|\\r])(\\n|\\r)",
raw_answer_snippet))
# count the number of newlines that we're going to remove
# before the answer (all newlines before the answer)
num_newlines_removed = len(re.findall("(\\r|\\n)", raw_answer_snippet))
# offset refers to how much to shift the span by
offset = (num_newlines_removed) - num_spaces_added
# remove consecutive newlines with spaces in the raw passage text
# to get a clean version with no linebreaks
processed_passage_text = re.sub("(\\r|\\n)+", " ", raw_passage_text)
# calculate the new span indices by subtracting the previously calcuated offset
final_span_start = initial_span_start - offset
final_span_end = initial_span_end - offset
# build the new row of the dataset
# question text
clean_row.append(row[1])
# label
clean_row.append(str(final_span_start) + ":" + str(final_span_end))
# answer as a string
clean_row.append(processed_passage_text[final_span_start:final_span_end])
# passage text
clean_row.append(processed_passage_text)
clean_rows.append(clean_row)
# turn the list of rows into a dataframe, and write to CSV
dataframe = pandas.DataFrame(clean_rows, columns=clean_headers)
folder, filename = os.path.split(newsqa_file_path)
outdirectory = folder + "/cleaned/"
os.makedirs(outdirectory, exist_ok=True)
outpath = outdirectory + filename + ".clean"
logger.info("Saving cleaned file to %s", outpath)
dataframe.to_csv(outpath, encoding="utf-8", index=False)
if __name__ == '__main__':
main()
|
apache-2.0
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/scipy/signal/spectral.py
|
28
|
34979
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode=mode)
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hann', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving average
filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
"""
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating data?",
http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
|
gpl-3.0
|
lorisercole/thermocepstrum
|
thermocepstrum/utils/blocks.py
|
1
|
21043
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, math
import thermocepstrum as tc
from thermocepstrum.utils import log
abs_path = os.path.abspath(sys.argv[0])
tc_path = abs_path[:abs_path.rfind('/')]
tc_path = tc_path[:tc_path.rfind('/')]
sys.path.append(tc_path[:tc_path.rfind('/')])
log.write_log(tc_path)
import numpy as np
import scipy as sp
import scipy.stats
import matplotlib
#matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.backends.backend_pdf import PdfPages
plt.style.reload_library()
try:
plt.style.use('./plot_style.mplstyle')
except:
plt.style.use(tc_path + 'utils/plot_style.mplstyle')
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
try:
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
except:
log.write_log('Error: cannot import inset_axes (will not be able to plot some parts of the plots)')
def main():
usage = """usage: {} output N_currents N_processes DT_FS max_THz nyquist k_SI_max zoom_max_THz zoom_k_SI_max input1 input2 ... inputN
N must be at least 2.
This utility computes some histograms and statistics of the outputs generated by the analysis program.
N_processes is the number of independent processes used, and N_currents is the number of currents in every random process.
DT_FS is the timestep in femtoseconds.
Riccardo Bertossa (SISSA), 2018
""".format(sys.argv[0])
#number of elements of the periodogram used in making the big histogram
all_cut = 1500
if len(sys.argv) < 8:
log.write_log(usage)
exit(-1)
output = sys.argv[1]
M = int(sys.argv[2])
L = int(sys.argv[3])
dof = L - M + 1
log.write_log(dof, ' degrees of freedom for the chi2 distribution')
DT_FS = float(sys.argv[4])
max_THz = float(sys.argv[5])
nyq = float(sys.argv[6])
k_SI_max = float(sys.argv[7])
zmax_THz = float(sys.argv[8])
zk_SI_max = float(sys.argv[9])
ff = 10
ndata = len(sys.argv) - ff
log.write_log('Number of inputs: {}\n reading...'.format(ndata))
periodograms = []
cospectrums = []
cepstrals = []
aic_Kmins = np.zeros(ndata)
kappas_Kmin = np.zeros(ndata)
kappas_Kmin_std = np.zeros(ndata)
kappa_scales = np.zeros(ndata)
l0s = np.zeros(ndata)
l0s_std = np.zeros(ndata)
if os.path.isfile(sys.argv[ff] + '.psd.npy'):
freqs = np.load(sys.argv[ff] + '.psd.npy')[0]
else:
freqs = np.loadtxt(sys.argv[ff] + '.psd.dat', usecols=(0,), unpack=True)
cont = 0
for fname in sys.argv[ff:]:
if os.path.isfile(fname + '.psd.npy'):
periodograms.append(np.load(fname + '.psd.npy')[3:5])
else:
periodograms.append(np.loadtxt(fname + '.psd.dat', usecols=(3, 4), unpack=True))
if periodograms[-1].shape != periodograms[0].shape:
log.write_log(fname, ' not used (inconsistent shape with firts element)', periodograms[-1].shape,
periodograms[0].shape)
del periodograms[-1]
continue
if os.path.isfile(fname + '.cepstral.npy'):
cepstrals.append(np.load(fname + '.cepstral.npy')[[4, 5, 2, 3]])
else:
cepstrals.append(np.loadtxt(fname + '.cepstral.dat', usecols=(4, 5), unpack=True))
if os.path.isfile(fname + '.cospectrum.npy'):
cospectrums.append(np.load(fname + '.cospectrum.npy')[1] / L)
elif os.path.isfile(fname + '.cospectrum.dat'):
cospectrums.append(np.loadtxt(fname + '.cospectrum.npy')[1] / L)
if len(cospectrums) == 0:
cospectrums = None
fka = open(fname + '.kappa_scale_aicKmin.dat')
kappa_scales[cont] = float(fka.readline())
aic_Kmin = int(fka.readline())
aic_Kmins[cont] = aic_Kmin
fka.close()
kappas_Kmin[cont] = cepstrals[cont][0, aic_Kmin]
kappas_Kmin_std[cont] = cepstrals[cont][1, aic_Kmin]
l0s[cont] = cepstrals[cont][2, aic_Kmin]
l0s_std[cont] = cepstrals[cont][3, aic_Kmin]
log.write_log(fname, periodograms[cont].shape, cepstrals[cont].shape)
cont += 1
aic_KminM = np.mean(aic_Kmins)
aic_Kmin = int(aic_KminM)
log.write_log('Reading done.')
#resizing and creating a big numpy array.
#for periodogram,cepstral in zip(periodograms,cepstrals):
for i in range(1, len(periodograms)):
periodograms[i].resize(periodograms[0].shape)
cepstrals[i].resize(cepstrals[0].shape)
# *this does not work when using a lot of data
#import pdb; pdb.set_trace()
#log.write_log(periodograms[0].shape)
#for i in range(1,len(periodograms)):
# log.write_log i
# periodograms[i]=np.resize(periodograms[i],periodograms[0].shape)
# cepstrals[i]=np.resize(cepstrals[i],cepstrals[0].shape)
cepstrals = np.array(cepstrals, copy=False)
periodograms = np.array(periodograms, copy=False)
log = open(output + '.log', 'w')
log.write('Mean value of kappa_scale: {}\n'.format(np.mean(kappa_scales)))
log.write('Mean value of minimum of AIC: {}\n'.format(aic_KminM))
log.write('\n')
log.write('Mean value and standard deviation of kappa(aic_Kmin): {} +/- {}\n'.format(
np.mean(kappas_Kmin), np.std(kappas_Kmin)))
log.write('Mean value of calculated statistical error per block of kappa(aic_Kmin): {}\n'.format(
np.mean(kappas_Kmin_std)))
log.write('\n')
log.write('Mean value and standard deviation of L0(aic_Kmin): {} +/- {}\n'.format(
np.mean(l0s), np.std(l0s)))
log.write('Mean value of calculated statistical error per block of L0(aic_Kmin): {}\n'.format(np.mean(l0s_std)))
log.write('\n\n===========')
log.write('Values at mean value of aic_Kmin:\n')
log.write('\n')
log.write('Mean value and standard deviation of kappa(aic_Kminm): {} +/- {}\n'.format(
np.mean(cepstrals[:, 0, aic_Kmin]), np.std(cepstrals[:, 0, aic_Kmin])))
log.write('Mean value of calculated statistical error per block of kappa(aic_Kmin): {}\n'.format(
np.mean(cepstrals[:, 1, aic_Kmin])))
log.write('\n')
log.write('Mean value and standard deviation of L0(aic_Kmin): {} +/- {}\n'.format(
np.mean(cepstrals[:, 2, aic_Kmin]), np.std(cepstrals[:, 2, aic_Kmin])))
log.write('Mean value of calculated statistical error per block of L0(aic_Kmin): {}\n'.format(
np.mean(cepstrals[:, 3, aic_Kmin])))
log.close()
#calculate statistics over first axis
std_periodogram = np.std(periodograms, axis=0, ddof=1)
std_cepstral = np.std(cepstrals, axis=0, ddof=1)
mean_periodogram = np.mean(periodograms, axis=0)
mean_cospectrum = None
if cospectrums != None:
mean_cospectrum = np.mean(cospectrums, axis=0)
mean_cepstral = np.mean(cepstrals, axis=0)
log.write_log(mean_cepstral.shape)
log.write_log(mean_periodogram.shape)
np.savetxt(output + '.mean_periodogram',
np.c_[freqs, mean_periodogram[0], std_periodogram[0], mean_periodogram[1], std_periodogram[1]])
np.savetxt(output + '.mean_cepstral', np.c_[mean_cepstral[0], std_cepstral[0], mean_cepstral[1], std_cepstral[1]])
log.write_log('Mean values and standard deviations done.')
log.write_log('Computing index of .40 psd power...')
psd_int = np.cumsum(mean_periodogram[0])
psd_int = psd_int / psd_int[-1]
p95 = 0
while psd_int[p95] < 0.10:
p95 = p95 + 1
all_cut = p95
all_cut = 100
#select only components that are significantly different from zero
selection_not_zero = []
zero = mean_periodogram[0, 0] / 10
for i in range(all_cut):
if mean_periodogram[0, i] > zero:
selection_not_zero.append(i)
log.write_log('Number of components > {}: {}. Last is {}'.format(zero, len(selection_not_zero),
selection_not_zero[-1]))
# log.write_log(selection_not_zero)
log.write_log('Index = {} , {} THz'.format(p95, freqs[p95]))
log.write_log('Some plots...')
#make some plots and histograms
with PdfPages(output + '_all.pdf') as pdf:
plt.fill_between(freqs, mean_periodogram[0] - std_periodogram[0], mean_periodogram[0] + std_periodogram[0])
plt.plot(freqs, mean_periodogram[0])
plt.title('Original PSD')
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
class Psd:
psd = None
mpsd = None
fpsd = None
freqs_THz = None
kappa_scale = None
cospectrum = None
mcospectrum = None
ucospectrum = None
DT_FS = None
def ffpsd(self, w, single=False):
WF = int(round(w / 1000. * self.DT_FS * len(self.freqs_THz) * 2.))
log.write_log('filtering: ', WF)
if not single:
ffpsd = tc.md.tools.filter.runavefilter(self.mpsd, WF)
else:
ffpsd = tc.md.tools.filter.runavefilter(self.psd, WF)
self.fpsd = ffpsd
try:
for i in range(self.ucospectrum.shape[0]):
for j in range(self.ucospectrum.shape[1]):
if not single:
ffc = tc.md.tools.filter.runavefilter(self.mcospectrum[i, j], WF)
else:
ffc = tc.md.tools.filter.runavefilter(self.ucospectrum[i, j], WF)
self.cospectrum[i, j] = ffc
except AttributeError:
pass
psd = Psd()
psd.DT_FS = DT_FS
psd.kappa_scale = np.mean(kappa_scales) / DT_FS
psd.freqs_THz = freqs
plot_idx = 2
psd.psd = periodograms[plot_idx, 0, :]
psd.mpsd = mean_periodogram[0, :]
psd.fpsd = np.copy(mean_periodogram[0, :])
psd.ucospectrum = cospectrums[plot_idx]
psd.mcospectrum = mean_cospectrum
psd.cospectrum = np.copy(mean_cospectrum)
plt_psd(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt_psd_with_zoom(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max, inset_maxTHz=zmax_THz,
inset_maxk=zk_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, np.real(psd.cospectrum[1, 0]))
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, psd.cospectrum[1, 1])
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
psd.ffpsd(0.5, single=True)
plt_psd(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt_psd_with_zoom(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max, inset_maxTHz=zmax_THz,
inset_maxk=zk_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, np.real(psd.cospectrum[1, 0]))
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, psd.cospectrum[1, 1])
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
psd.ffpsd(0.2, single=False)
plt_psd(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt_psd_with_zoom(psd, k_00=True, f_THz_max=max_THz, nyq=nyq, k_SI_max=k_SI_max, inset_maxTHz=zmax_THz,
inset_maxk=zk_SI_max)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, np.real(psd.cospectrum[1, 0]))
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
plt.xlim([0, max_THz])
plt.plot(psd.freqs_THz, psd.cospectrum[1, 1])
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
#make histogram for component 0 and 1 of psd
#compute means without i-th element
independent_mean = np.zeros((periodograms.shape[0], periodograms.shape[2]))
for i in range(periodograms.shape[0]):
all_but_ith = [x for x in range(periodograms.shape[0]) if x != i]
independent_mean[i, :] = np.mean(periodograms[all_but_ith, 0, :], axis=0)
plt.hist(kappas_Kmin)
plt.title('kappa(aic-Kmin)')
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
data1 = periodograms[:, 0, 0] / independent_mean[:, 0]
ks__0 = plt_hist_single_psd(data1, dof)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
data2 = periodograms[:, 0, 1] / independent_mean[:, 1]
ks__1 = plt_hist_single_psd(data2, dof * 2)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
#ks_1=sp.stats.kstest(data2*2*dof,sp.stats.chi2(2*dof).cdf)
#make histogram for all components normalized
all_normalized = np.zeros(periodograms.shape[0] * (len(selection_not_zero)))
for i in range(periodograms.shape[0]):
#for idx,i in enumerate(selection_not_zero):
all_normalized[i * len(selection_not_zero):(i + 1) *
len(selection_not_zero
)] = periodograms[i, 0, selection_not_zero] / independent_mean[i, selection_not_zero]
ks_all = plt_hist_single_psd(all_normalized, dof * 2, nbins=100)
pdf.savefig(bbox_inches='tight', pad_inches=0.0)
plt.close()
# np.savetxt(output+'.histogram_all',np.c_[(intervals[1:]+intervals[:-1])/2.0,histogram/np.sum(histogram)])
# log.write_log('Histogram bin width: {}'.format(intervals[1]-intervals[0]))
# np.savetxt(output+'.kolmogorov_smirnov',[ks_0,ks_1,ks_all])
log.write_log('Statistical test results (psd(0), psd(1), psd(all but 0)): {}'.format([ks__0, ks__1, ks_all]))
#make graphs of mean of theoretical and statistical error of the final result
plt.fill_between(np.arange(mean_cepstral.shape[1]), mean_cepstral[0] - std_cepstral[0],
mean_cepstral[0] + std_cepstral[0])
plt.plot(np.arange(mean_cepstral.shape[1]), mean_cepstral[0] - mean_cepstral[1])
plt.plot(np.arange(mean_cepstral.shape[1]), mean_cepstral[0] + mean_cepstral[1])
plt.plot(np.arange(mean_cepstral.shape[1]), mean_cepstral[0])
plt.title('Convergence of cepstral result with theoretical and statistical errors')
plt.xlim([0, 10 * aic_Kmin])
max_y = np.amax((mean_cepstral[0] + std_cepstral[0])[aic_Kmin:3 * aic_Kmin])
min_y = np.amin((mean_cepstral[0] - std_cepstral[0])[aic_Kmin:3 * aic_Kmin])
plt.ylim([min_y * 0.8, max_y * 1.2])
pdf.savefig(bbox_inches='tight')
plt.close()
def plt_hist_single_psd(data1, dof, nbins=None):
fig = plt.figure(figsize=(3.8, 2.3))
if nbins != None:
h, i, p = plt.hist(data1 * dof, bins=nbins, normed=True)
else:
h, i, p = plt.hist(data1 * dof, normed=True)
xmax = i[-1]
ymax = np.max(h) * 1.2
plt.xlim([0.0, xmax])
plt.ylim([0.0, ymax])
x = np.linspace(0.0, xmax, 1000)
plt.plot(x, sp.stats.chi2.pdf(x, dof), ls=':', label='$\chi^2_{{{}}}$'.format(dof))
plt.xlabel('Normalized values')
plt.ylabel('Probability')
plt.legend()
dx1, dx2 = n_tick_in_range(0, xmax, 5)
dy1, dy2 = n_tick_in_range(0, ymax, 5)
plt.axes().xaxis.set_major_locator(MultipleLocator(dx1))
plt.axes().xaxis.set_minor_locator(MultipleLocator(dx2))
plt.axes().yaxis.set_major_locator(MultipleLocator(dy1))
plt.axes().yaxis.set_minor_locator(MultipleLocator(dy2))
ks_0 = sp.stats.kstest(data1 * dof, sp.stats.chi2(dof).cdf)
text = 'KS-value=${}$\nP=${}$'.format(as_si(ks_0[0], 1), as_si(ks_0[1], 1))
plt.text(.5, .6, text, transform=fig.transFigure)
return ks_0
def plt_psd_with_zoom(jf, j2=None, j2pl=None, f_THz_max=None, k_SI_max=None, k_00=False, nyq=None, inset_maxTHz=None,
inset_maxk=None):
#plt.axes([0,1,0,1])
fig_r, ax0 = plt_psd(jf, j2, j2pl, f_THz_max, k_SI_max, k_00, nyq)
coord_f = [0.23, 0.55, 0.3, 0.3]
ax = fig_r.add_axes(coord_f)
inv = fig_r.transFigure # + ax0.transData.inverted()
f_x = 0.72
f_x2 = 1.25
f_y = 0.87
f_y2 = 1.35
log.write_log(inv.transform((coord_f[0] * f_x, coord_f[1] * f_y)))
ax0.add_patch(
matplotlib.patches.Rectangle((coord_f[0] * f_x, coord_f[1] * f_y), coord_f[2] * f_x2, coord_f[3] * f_y2,
fill=True, color='White', visible=True, transform=inv))
#plt.box()
plt_psd(jf, j2, j2pl, inset_maxTHz, inset_maxk, k_00, nyq, False, axes=ax)
def plt_psd(jf, j2=None, j2pl=None, f_THz_max=None, k_SI_max=None, k_00=False, nyq=None, plt_figure=True, axes=None):
if f_THz_max == None:
idx_max = index_cumsum(jf.psd, 0.95)
f_THz_max = jf.freqs_THz[idx_max]
if k_SI_max == None:
k_SI_max = np.max(
jf.fpsd[:int(jf.freqs_THz.shape[0] * f_THz_max / jf.freqs_THz[-1])] * jf.kappa_scale * .5) * 1.3
if k_00:
try:
k_SI_max2 = np.max(
np.real(jf.cospectrum[0, 0][:int(jf.freqs_THz.shape[0] * f_THz_max / jf.freqs_THz[-1])]) *
jf.kappa_scale * .5) * 1.3
if k_SI_max < k_SI_max2:
k_SI_max = k_SI_max2
except AttributeError:
pass
fig_r = None
if plt_figure:
fig_r = plt.figure(figsize=(3.4, 2.0))
if axes is None:
axes = plt.gca()
axes.plot(jf.freqs_THz, jf.psd * jf.kappa_scale * .5, lw=0.2, c='0.8', zorder=0)
axes.plot(jf.freqs_THz, jf.fpsd * jf.kappa_scale * .5, c=c[0], zorder=2)
if j2 != None:
axes.axvline(x=j2.Nyquist_f_THz, ls='--', c='k', dashes=(1.4, 0.6), zorder=3)
if j2pl != None:
axes.plot(j2pl.freqs_THz, j2pl.dct.psd * j2pl.kappa_scale * .5, c=c[2])
try:
axes.plot(jf.freqs_THz, np.real(jf.cospectrum[0, 0]) * jf.kappa_scale * .5, c=c[3], lw=1.0, zorder=1)
except AttributeError:
pass
axes.set_ylim(0, k_SI_max)
axes.set_xlim(0, f_THz_max)
if plt_figure:
axes.set_xlabel(r'$\omega/2\pi$ (THz)')
# axes.set_ylabel('$^{\ell M}\widehat{S}\'_{\,k}$ (W/mK)')
axes.set_ylabel(r'W/(m$\,$K)')
idxnyq = int(nyq / jf.freqs_THz[-1] * jf.freqs_THz.size)
if nyq != None and nyq < f_THz_max:
axes.annotate('', xy=(nyq, (k_SI_max-jf.fpsd[idxnyq]*jf.kappa_scale*.5)/7+jf.fpsd[idxnyq]*jf.kappa_scale*.5), \
xytext=(nyq, (k_SI_max-jf.fpsd[idxnyq]*jf.kappa_scale*.5)/7+jf.fpsd[idxnyq]*jf.kappa_scale*.5+k_SI_max/7.0), \
arrowprops={'width': 1.0, 'headwidth': 3.0, 'headlength': 7, 'color': 'k'})
ntick = 5
if axes is not None:
ntick = 3
dx1, dx2 = n_tick_in_range(0, f_THz_max, ntick)
dy1, dy2 = n_tick_in_range(0, k_SI_max, ntick)
#dx1=10
#dx2=5
axes.xaxis.set_major_locator(MultipleLocator(dx1))
axes.xaxis.set_minor_locator(MultipleLocator(dx2))
axes.yaxis.set_major_locator(MultipleLocator(dy1))
axes.yaxis.set_minor_locator(MultipleLocator(dy2))
return fig_r, axes
def n_tick_in_range(beg, end, n, n_c=1, nit=0):
size = end - beg
dx0 = (end - beg) / n
e = 10**(math.ceil(math.log10(dx0)))
m = dx0 / e
cifre0 = math.ceil(m * 10**(n_c))
cifre = cifre0 - cifre0 % 5
if cifre == 0:
cifre = 1.0
delta = cifre * e / 10**(n_c)
#log.write_log("n=",n, " dx0=",dx0," e=",e," m=" ,m," cifre=", cifre)
if nit < 30:
if delta >= size:
return n_tick_in_range(beg, end, n + 1, n_c, nit + 1)
if (end - beg) / delta > n and n > 1:
return n_tick_in_range(beg, end, n - 1, n_c, nit + 1)
return delta, delta / 2
def index_cumsum(arr, p):
if (p > 1 or p < 0):
raise ValueError('p must be between 0 and 1')
arr_int = np.cumsum(arr)
arr_int = arr_int / arr_int[-1]
idx = 0
while arr_int[idx] < p:
idx = idx + 1
return idx
def as_si(x, ndp):
s = '{x:0.{ndp:d}e}'.format(x=x, ndp=ndp)
try:
m, e = s.split('e')
except ValueError:
return r'0\times 10^{\infty}'
return r'{m:s}\times 10^{{{e:d}}}'.format(m=m, e=int(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
rallured/PyXFocus
|
examples/axro/stressCompensation.py
|
1
|
3389
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import pdb,os,pyfits
import utilities.imaging.fitting as fit
import traces.sources as sources
import traces.transformations as tran
import traces.surfaces as surf
import traces.analyses as anal
#Get distortion coefficients
os.chdir('/home/rallured/Dropbox/AXRO/Alignment/AlignmentStressCompensation/')
res = pyfits.getdata('PrimaryCoeffs.fits')
res[0][0] = 0.
res2 = pyfits.getdata('SecondaryCoeffs.fits')
res2[0][0] = 0.
def findDistortionCoefficients(filename,Nx,Ny,method='cubic'):
"""
Fit L-L coefficients to distortion data produced by Vanessa.
"""
#Load in data
d = np.transpose(np.genfromtxt(filename,skip_header=1,delimiter=','))
#Compute angle and radial perturbations
x0,y0,z0 = d[2:5]
t0 = np.arctan2(x0,-z0)
r0 = 1 - np.sqrt(x0**2+z0**2)
#Define regular grid
tg,zg = np.meshgrid(np.linspace(t0.min(),t0.max(),Nx+2),\
np.linspace(y0.min(),y0.max(),Ny+2))
#Regrid nominal node positions
d0 = griddata((t0,y0),r0,(tg,zg),method=method)
#Find perturbed node positions
x1,y1,z1 = x0+d[5],y0+d[6],z0+d[7]
t1 = np.arctan2(x1,-z1)
r1 = 1 - np.sqrt(x1**2+z1**2)
#Regrid distorted nodes onto original grid
d1 = griddata((t1,y1),r1,(tg,zg),method=method)
#Get distortion data
#
d = d1-d0
#Apply fit
res = fit.legendre2d(d,xo=10,yo=10)
xo,yo = np.meshgrid(np.arange(11),np.arange(11))
pdb.set_trace()
#Return coefficients and order arrays
return res[1].flatten(),xo.flatten(),yo.flatten()
def pairRaytrace(secondaryTilt,despace):
"""Trace the distorted mirror pair. Assume no gap for now.
Vignette rays that land outside active mirror areas."""
#Define ray subannulus
r1 = surf.con.primrad(8600.,1000.,8400.)
ang = 260./1000. #arc length over radius is angular extent
rays = sources.subannulus(1000.,r1,ang,10**3)
tran.transform(rays,0,0,0,np. pi,0,0) #Point in -z
tran.transform(rays,0,0,-10000,0,0,0) #Converge from above
#Trace to primary
surf.primaryLL(rays,1000.,8400.,8600,8400,ang,res[0],res[2],res[1])
#Vignette rays missing
ind = np.logical_and(rays[3]<8600.,rays[3]>8400.)
rays = tran.vignette(rays,ind)
numin = float(len(rays[1]))
#Reflect
tran.reflect(rays)
#Bring to midplane of despaced secondary
#Apply secondary misalignment
tran.transform(rays,surf.con.secrad(8300.,1000.,8400.),0,8300,0,0,0)
tran.transform(rays,0,0,despace,0,secondaryTilt,0)
tran.itransform(rays,surf.con.secrad(8300.,1000.,8400.),0,8300,0,0,0)
#Trace to secondary
surf.secondaryLL(rays,1000.,8400.,8400.,8200.,ang,res2[0],res2[2],res2[1])
#Vignette rays missing
ind = np.logical_and(rays[3]<8400.,rays[3]>8200.)
rays = tran.vignette(rays,ind)
numout = float(len(rays[1]))
#Reflect
tran.reflect(rays)
#Reverse secondary misalignment
tran.transform(rays,surf.con.secrad(8300.,1000.,8400.),0,8300,0,0,0)
tran.itransform(rays,0,0,despace,0,secondaryTilt,0)
tran.itransform(rays,surf.con.secrad(8300.,1000.,8400.),0,8300,0,0,0)
#Go to focus
surf.focusI(rays)
#Get centroid
cx,cy = anal.centroid(rays)
#Return merit function
return anal.rmsCentroid(rays)/8400.*180/np.pi*60**2,numout/numin,cx,cy
|
mit
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/decomposition/tests/test_nmf.py
|
21
|
17922
|
import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
|
mit
|
modsy/incubator-airflow
|
airflow/contrib/hooks/bigquery_hook.py
|
3
|
31510
|
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import logging
import time
from airflow.contrib.hooks.gc_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from apiclient.discovery import build
from pandas.io.gbq import GbqConnector, _parse_data as gbq_parse_data
from pandas.tools.merge import concat
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. Connections must be defined with an extras JSON
field containing:
{
"project": "<google project ID>",
"service_account": "<google service account email>",
"key_path": "<p12 key path>"
}
If you have used ``gcloud auth`` to authenticate on the machine that's
running Airflow, you can exclude the service_account and key_path
parameters.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
scope='https://www.googleapis.com/auth/bigquery',
bigquery_conn_id='bigquery_default',
delegate_to=None):
"""
:param scope: The scope of the hook.
:type scope: string
"""
super(BigQueryHook, self).__init__(
scope=scope,
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
connection_extras = self._extras_dejson()
project = connection_extras['project']
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
connection_extras = self._extras_dejson()
project = connection_extras['project']
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql, verbose=False)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False):
self.test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
"""
configuration = {
'query': {
'query': bql,
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_dataset, destination_table = \
destination_dataset_table.split('.', 1)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': self.project_id,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
self._split_project_dataset_table_input(
'source_project_dataset_table', source_project_dataset_table)
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(<project>.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: <project>.<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
self._split_project_dataset_table_input(
'source_project_dataset_table', source_project_dataset_table)
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
assert 3 == len(destination_project_dataset_table.split('.')), (
'Expected destination_project_dataset_table in the format of '
'<project>.<dataset>.<table>. '
'Got: {}').format(destination_project_dataset_table)
destination_project, destination_dataset, destination_table = \
destination_project_dataset_table.split('.', 2)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=','):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.)<dataset>.<table> BigQuery table to load data into.
If <project> is not included, project will be the project defined in
the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
"""
destination_project, destination_dataset, destination_table = \
self._split_project_dataset_table_input(
'destination_project_dataset_table', destination_project_dataset_table)
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'schema': {
'fields': schema_fields
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def _split_project_dataset_table_input(self, var_name, project_dataset_table):
"""
:param var_name: the name of the variable input, for logging and erroring purposes.
:type var_name: str
:param project_dataset_table: input string in (<project>.)<dataset>.<project> format.
if project is not included in the string, self.project_id will be returned in the tuple.
:type project_dataset_table: str
:return: (project, dataset, table) tuple
"""
table_split = project_dataset_table.split('.')
assert len(table_split) == 2 or len(table_split) == 3, (
'Expected {var} in the format of (<project.)<dataset>.<table>, '
'got {input}').format(var=var_name, input=project_dataset_table)
if len(table_split) == 2:
logging.info('project not included in {var}: {input}; using project "{project}"'.format(var=var_name, input=project_dataset_table, project=self.project_id))
dataset, table = table_split
return self.project_id, dataset, table
else:
project, dataset, table = table_split
return project, dataset, table
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Wait for query to finish.
while not job['status']['state'] == 'DONE':
logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id)
time.sleep(5)
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: %s', job['status']['errorResult'])
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
table_exists = False
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
if 'tables' in tables_list_resp:
for table in tables_list_resp['tables']:
if table['tableReference']['tableId'] == table_id:
table_exists = True
break
# do update if table exists
if table_exists:
logging.info('table %s:%s.%s exists, updating.', project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# do insert if table does not exist
else:
logging.info('table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_project,
source_dataset,
view_project,
view_dataset,
view_table):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_project: the project of the source dataset
:type source_project: str
:param source_dataset: the source dataset
:type source_dataset: str
:param view_project: the project that the view is in
:type view_project: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:return: the datasets resource of the source dataset.
"""
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in xrange(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
|
apache-2.0
|
jakobzhao/wbcrawler3
|
backup/outputGexf.py
|
1
|
1155
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Oct 26, 2012
@author: Bo Zhao
@email: [email protected]
@website: http://yenching.org
@organization: The Ohio State University
'''
# retweet type
# 1: reply
# 2: comment
# 3: reply to a comment
# 4: a reply and a comment
# status type
# 0: original
# 1: reply
# 2: comments
import sqlite3
import networkx as nx
import matplotlib.pylab as plt
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
#--------------------------------build network-----------------------------------
database = '../data/pm2.5.db'
conn = sqlite3.connect(database)#to name it with a specific word
cursor = conn.cursor()
cursor.execute('select Id, Source_Name, Target_Name from user_edges')
edges = cursor.fetchall()
cursor.execute('select Id, Node from user_nodes')
nodes = cursor.fetchall()
conn.commit()
conn.close()
G = nx.DiGraph()
for node in nodes:
G.add_node(node[1])
for edge in edges:
G.add_edge(edge[1],edge[2])
#print G.nodes()
#nx.draw(G,node_size=60,font_size=8)
#nx.draw(G)
nx.write_gexf(G,'test2.gexf',prettyprint = True)
plt.savefig("../data/img/path2.png")
|
mit
|
wangtuanjie/airflow
|
airflow/hooks/base_hook.py
|
20
|
1812
|
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.utils import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplemented()
def get_records(self, sql):
raise NotImplemented()
def get_pandas_df(self, sql):
raise NotImplemented()
def run(self, sql):
raise NotImplemented()
|
apache-2.0
|
mne-tools/mne-tools.github.io
|
0.21/_downloads/d6d0db28086fd1732c34960f28cf6830/plot_stats_cluster_time_frequency.py
|
18
|
4878
|
"""
=========================================================================
Non-parametric between conditions cluster statistic on single trial power
=========================================================================
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs for 2 conditions
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if the power estimates are significantly different
between conditions.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
###############################################################################
# Factor to downsample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
###############################################################################
# Compute statistic
# -----------------
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2], out_type='mask',
n_permutations=100, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
|
bsd-3-clause
|
microsoft/LightGBM
|
tests/python_package_test/test_plotting.py
|
1
|
8029
|
# coding: utf-8
import pytest
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from lightgbm.compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED
if MATPLOTLIB_INSTALLED:
import matplotlib
matplotlib.use('Agg')
if GRAPHVIZ_INSTALLED:
import graphviz
from .utils import load_breast_cancer
@pytest.fixture(scope="module")
def breast_cancer_split():
return train_test_split(*load_breast_cancer(return_X_y=True),
test_size=0.1, random_state=1)
@pytest.fixture(scope="module")
def train_data(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
return lgb.Dataset(X_train, y_train)
@pytest.fixture
def params():
return {"objective": "binary",
"verbose": -1,
"num_leaves": 3}
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_importance(params, breast_cancer_split, train_data):
X_train, _, y_train, _ = breast_cancer_split
gbm0 = lgb.train(params, train_data, num_boost_round=10)
ax0 = lgb.plot_importance(gbm0)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Feature importance'
assert ax0.get_xlabel() == 'Feature importance'
assert ax0.get_ylabel() == 'Features'
assert len(ax0.patches) <= 30
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y')
assert isinstance(ax1, matplotlib.axes.Axes)
assert ax1.get_title() == 't'
assert ax1.get_xlabel() == 'x'
assert ax1.get_ylabel() == 'y'
assert len(ax1.patches) <= 30
for patch in ax1.patches:
assert patch.get_facecolor() == (1., 0, 0, 1.) # red
ax2 = lgb.plot_importance(gbm0, color=['r', 'y', 'g', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
assert len(ax2.patches) <= 30
assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r
assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y
assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g
assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
X_train, _, y_train, _ = breast_cancer_split
gbm0 = lgb.train(params, train_data, num_boost_round=10)
ax0 = lgb.plot_split_value_histogram(gbm0, 27)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Split value histogram for feature with index 27'
assert ax0.get_xlabel() == 'Feature split value'
assert ax0.get_ylabel() == 'Count'
assert len(ax0.patches) <= 2
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5),
title='Histogram for feature @index/name@ @feature@',
xlabel='x', ylabel='y', color='r')
assert isinstance(ax1, matplotlib.axes.Axes)
title = f'Histogram for feature name {gbm1.booster_.feature_name()[27]}'
assert ax1.get_title() == title
assert ax1.get_xlabel() == 'x'
assert ax1.get_ylabel() == 'y'
assert len(ax1.patches) <= 2
for patch in ax1.patches:
assert patch.get_facecolor() == (1., 0, 0, 1.) # red
ax2 = lgb.plot_split_value_histogram(gbm0, 27, bins=10, color=['r', 'y', 'g', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
assert len(ax2.patches) == 10
assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r
assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y
assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g
assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b
with pytest.raises(ValueError):
lgb.plot_split_value_histogram(gbm0, 0) # was not used in splitting
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED or not GRAPHVIZ_INSTALLED,
reason='matplotlib or graphviz is not installed')
def test_plot_tree(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
lgb.plot_tree(gbm, tree_index=83)
ax = lgb.plot_tree(gbm, tree_index=3, figsize=(15, 8), show_info=['split_gain'])
assert isinstance(ax, matplotlib.axes.Axes)
w, h = ax.axes.get_figure().get_size_inches()
assert int(w) == 15
assert int(h) == 8
@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed')
def test_create_tree_digraph(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
constraints = [-1, 1] * int(X_train.shape[1] / 2)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, monotone_constraints=constraints)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
lgb.create_tree_digraph(gbm, tree_index=83)
graph = lgb.create_tree_digraph(gbm, tree_index=3,
show_info=['split_gain', 'internal_value', 'internal_weight'],
name='Tree4', node_attr={'color': 'red'})
graph.render(view=False)
assert isinstance(graph, graphviz.Digraph)
assert graph.name == 'Tree4'
assert graph.filename == 'Tree4.gv'
assert len(graph.node_attr) == 1
assert graph.node_attr['color'] == 'red'
assert len(graph.graph_attr) == 0
assert len(graph.edge_attr) == 0
graph_body = ''.join(graph.body)
assert 'leaf' in graph_body
assert 'gain' in graph_body
assert 'value' in graph_body
assert 'weight' in graph_body
assert '#ffdddd' in graph_body
assert '#ddffdd' in graph_body
assert 'data' not in graph_body
assert 'count' not in graph_body
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_metrics(params, breast_cancer_split, train_data):
X_train, X_test, y_train, y_test = breast_cancer_split
test_data = lgb.Dataset(X_test, y_test, reference=train_data)
params.update({"metric": {"binary_logloss", "binary_error"}})
evals_result0 = {}
lgb.train(params, train_data,
valid_sets=[train_data, test_data],
valid_names=['v1', 'v2'],
num_boost_round=10,
evals_result=evals_result0,
verbose_eval=False)
ax0 = lgb.plot_metric(evals_result0)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Metric during training'
assert ax0.get_xlabel() == 'Iterations'
assert ax0.get_ylabel() in {'binary_logloss', 'binary_error'}
ax0 = lgb.plot_metric(evals_result0, metric='binary_error')
ax0 = lgb.plot_metric(evals_result0, metric='binary_logloss', dataset_names=['v2'])
evals_result1 = {}
lgb.train(params, train_data,
num_boost_round=10,
evals_result=evals_result1,
verbose_eval=False)
with pytest.raises(ValueError):
lgb.plot_metric(evals_result1)
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
ax2 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
|
mit
|
SGMAP-AGD/anonymisation
|
examples/equidés/Equidés.py
|
1
|
4083
|
# coding: utf-8
#!/usr/bin/env python
"""
A partir des fonctions du dépôt anonymizer, ce fichier va notamment vous permettre de :
1. **Importer** les données de la base équidés.
2. **Nettoyer** les variables et sélectionner celles à anonymiser
3. **Anonymiser** les données selon un procédé de K-anonymisation
The file can be downloaded here:
https://www.data.gouv.fr/fr/datasets/fichier-des-equides/
or directly :
https://www.data.gouv.fr/s/resources/fichier-des-equides/20141201-185229/Equides.csv
Le fichier de 200 Mo contient autours de 3 millions de lignes
"""
import csv
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from anonymizer.anonymity import (get_k, get_anonymities,
less_anonym_groups,
all_local_aggregation)
from anonymizer.diversity import (get_l,
get_diversities,
diversity_distribution,
less_diverse_groups
)
from anonymizer.transformations import (first_letters,
last_letters,
local_aggregation)
from anonymizer.transformations import str_drop
from anonymizer.anonymDF import AnonymDataFrame
from anonymizer.config_anonymizer import config
import os
import io
# ## I. Nettoyage de la base de données
path_data = config['PATH']['EQUIDES']
equides = pd.read_csv(path_data, sep = ";", encoding = "ISO-8859-1",
nrows = 50000, header=None, low_memory = False)
nom_de_colonnes = ['Race',
'Sexe',
'Robe',
'Date de naissance',
'Pays de naissance',
'Nom',
'Destiné à la consommation humaine',
'Date de mort']
equides.columns = nom_de_colonnes
# On supprime la date de mort puisque cela nous fournirait un indice sur l'âge du cheval,
# qu'il faudrait veiller à anonymiser.
variables_supprimees = ['Date de mort', 'Destiné à la consommation humaine']
equides = equides.drop(variables_supprimees,1)
# La variable "date de naissance" doit être recodée. On choisit de ne garder que l'année.
equides['Date de naissance'] = last_letters(equides['Date de naissance'],6)
# On remplace les modalités vides ou non renseignées par des "non renseigné"
equides = equides.fillna('non renseigné')
equides = equides.applymap(lambda x: x.strip())
equides.replace('', 'non renseigné', inplace=True)
# On convertit tous les noms de races en minuscules afin de mieux pouvoir uniformiser
# et on normalise afin de n'obtenir plus qu'une modalité inconnu, anglo-arabe, weslh ou aa compl.
equides['Race'] = equides['Race'].str.lower()
liste_races = equides['Race'].unique().tolist()
for word in ['inconnu', 'anglo-arabe', 'welsh', 'aa compl.']:
for race in liste_races :
if word in race:
print(word, race)
equides['Race'] = equides['Race'].replace(race, word)
equides.replace('inconnu', 'non renseigné', inplace=True)
liste_races = equides['Race'].unique().tolist()
len(liste_races)
# ## II. Anonymisation
# On définit les variables à anonymiser
ordre_aggregation = ['Race',
'Sexe',
'Robe',
'Pays de naissance',
'Destiné à la consommation humaine',
'Date de naissance']
Equides = AnonymDataFrame(equides, ordre_aggregation, unknown='non renseigné')
def aggregation_serie(x):
return(local_aggregation(x, 5, 'regroup_with_smallest', 'non renseigné'))
method_anonymisation = [(name, aggregation_serie) for name in ordre_aggregation[:-1]]
def aggregation_year(x):
return(local_aggregation(x, 5, 'with_closest', 'non renseigné'))
method_anonymisation += [('Date de naissance', aggregation_year)]
Equides.local_transform(method_anonymisation, 5)
Equides.df = Equides.anonymized_df
Equides.get_k()
|
gpl-3.0
|
mo-g/iris
|
docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py
|
17
|
1646
|
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord('longitude').guess_bounds()
global_air_temp.coord('latitude').guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title('Volcanic ash total\nconcentration not regridded',
size='medium')
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title('Volcanic ash total concentration\n'
'regridded with AreaWeighted(mdtol={})'.format(mdtol),
size='medium')
plt.subplots_adjust(hspace=0, wspace=0.05,
left=0.001, right=0.999, bottom=0, top=0.955)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines('50m')
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
|
gpl-3.0
|
sgenoud/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
3
|
1547
|
"""
================================
SVM Exercise
================================
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
pl.figure(fig_num)
pl.clf()
pl.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=pl.cm.Paired)
# Circle out the test data
pl.scatter(X_test[:, 0], X_test[:, 1],
s=80, facecolors='none', zorder=10)
pl.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.pcolormesh(XX, YY, Z > 0, cmap=pl.cm.Paired)
pl.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
pl.title(kernel)
pl.show()
|
bsd-3-clause
|
JoseBlanca/seq_crumbs
|
setup.py
|
1
|
6619
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
import sys
from sys import version_info
import os
import fnmatch
import glob
import platform
import subprocess
import distutils.command.install_data
try:
from setuptools import setup
from setuptools.command import install
_SETUPTOOLS = True
except ImportError:
from distutils.core import setup
from distutils.command import install
_SETUPTOOLS = False
#print "using_setuptools", _SETUPTOOLS
# The next three lines are modified from Biopython
__version__ = "Undefined"
for line in open('crumbs/__init__.py'):
if (line.startswith('__version__')):
exec(line.strip())
break
def check_biopython():
'If a Biopython is not installed it recommends to do so'
if _SETUPTOOLS:
return # No need to check anything because it will get installed
msg = None
try:
import Bio
except ImportError:
msg = 'For some functionalities Bioypython >= 1.60\n is required'
if not msg:
try:
from Bio.bgzf import BgzfWriter
except ImportError:
msg = 'You have an old version of Biopython installed, '
msg += 'please update to >= 1.60\n'
if not msg:
return
sys.stderr.write(msg)
sys.exit(-1)
check_biopython()
def opj(*args):
path = os.path.join(*args)
return os.path.normpath(path)
def find_data_file(srcdir, *wildcards, **kw):
# get a list of all files under the srcdir matching wildcards,
# returned in a format to be used for install_data
def walk_helper(arg, dirname, files):
if '.svn' in dirname or '.git' in dirname:
return
names = []
lst, wildcards = arg
for wildcard in wildcards:
wc_name = opj(dirname, wildcard)
for fpath in files:
filename = opj(dirname, fpath)
if (fnmatch.fnmatch(filename, wc_name) and
not os.path.isdir(filename)):
names.append(filename)
if names:
lst.append((dirname, names))
file_list = []
recursive = kw.get('recursive', True)
if recursive:
os.path.walk(srcdir, walk_helper, (file_list, wildcards))
else:
walk_helper((file_list, wildcards),
srcdir,
[os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))])
return file_list
def get_platform_bin_dir():
'''It returns the platform specific bindir. It returns the relative path
from the source code root dir'''
system = platform.system().lower()
arch = platform.architecture()[0]
return os.path.join('crumbs', 'third_party', 'bin', system, arch)
platform_bin_dir = get_platform_bin_dir()
external_executables = find_data_file(platform_bin_dir, '*')
def get_scripts():
scripts = []
for file_ in os.listdir('bin'):
if not file_.endswith('.error'):
scripts.append(os.path.join('bin', file_))
return scripts
class SmartInstall(install.install):
def run(self):
result = install.install.run(self)
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
# install the manpages
# check we have rst2man
try:
import docutils
have_rst2man = True
except ImportError:
have_rst2man = False
man_dir = os.path.join(sys.prefix, 'share/man/man1')
if have_rst2man:
if not os.path.exists(man_dir):
os.makedirs(man_dir)
for fpath in os.listdir('doc'):
if not fpath.endswith('.rst'):
continue
rst_fpath = os.path.join('doc', fpath)
man_fpath = os.path.join(man_dir,
os.path.splitext(fpath)[0] + '.1')
#print 'generating manpage: ', man_fpath
subprocess.call(['rst2man.py', rst_fpath, man_fpath])
return result
class InstallData(distutils.command.install_data.install_data):
"""need to change self.install_dir to the actual library dir"""
def run(self):
'''It modifies the place in which the thrid_party_binaries will be
installed.'''
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return distutils.command.install_data.install_data.run(self)
with open('README.rst') as file_:
long_description = file_.read()
setup_args = {'name': 'ngs_crumbs',
'version': __version__,
'description': 'Small utilities for NGS files manipulation',
'long_description': long_description,
'author': 'Jose Blanca & Peio Ziarsolo',
'author_email': '[email protected]',
'url': 'http://bioinf.comav.upv.es/ngs_crumbs/',
'packages': ['crumbs', 'crumbs.third_party', 'crumbs.utils',
'crumbs.seq', 'crumbs.bam', 'crumbs.vcf'],
'include_package_data': True,
'data_files': external_executables,
'scripts': get_scripts(),
'license': 'AGPL',
'cmdclass': {'install': SmartInstall,
'install_data': InstallData}
}
if _SETUPTOOLS:
setup_args['install_requires'] = ['biopython >= 1.60', 'configobj',
'toolz', 'pysam>=0.8', 'rpy2',
'matplotlib']
if version_info[0] < 3 or (version_info[0] == 3 and version_info[1] < 3):
# until python 3.3 the standard file module has no support for
# wrapping file object and required to open a new file
# bz2file is a backport of the python 3.3 std library module
setup_args['install_requires'].append('bz2file')
setup(**setup_args)
|
gpl-3.0
|
CovingtonResearchGroup/olm
|
olm/loggers/loggerScripts.py
|
1
|
8602
|
"""
Contains functions that are useful in general for manipulation of data logger data
"""
from pandas import DataFrame, Series, notnull, Timestamp
from matplotlib.dates import date2num
from numpy import arange
#accepts a list of logger DateFrame objects as first argument
def joinLoggers(loggerlist, how='inner', interpolate = False):
"""
Joins the data from a list of logger DataFrame objects together.
Parameters
----------
loggerlist : list
A list of logger DataFrame objects to be joined.
how : string, optional
How the two DataFrames are to be joined. Default is inner.
interpolate : boolean, optional
Determines whether empty rows are to be filled with data via interpolation. Uses Pandas Dataframe.interpolate(). Default = False
Returns
-------
joined : pandas.DataFrame
DataFrame of joined loggers.
"""
#merge data from multiple loggers
if type(loggerlist) == list:
joined = loggerlist[0].join(loggerlist[1:], how=how)
if interpolate:
for col in joined.columns:
filled_col = joined[col].interpolate()
joined[col] = filled_col
return joined
else:
print("Problem with input list: Need to input a list of DataFrame objects")
return None
def joinAndResampleLoggers(loggerlist, interval, suffixes=[], how='inner', interpolate=False, limit=None):
"""
Joins and resamples data from DataFrame objects provided in a list.
Parameters
----------
loggerlist : list
List of logger pandas.core.dataframe.DataFrame objects to be joined.
interval : string
Pandas offset string (http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases) on which the DataFrames should be resampled (e.g. 'H'=hour, 'T'=minute, 'D'=day).
suffixes : list
A list of strings (same length as loggerlist) that contains suffixes to be applied to each logger. This is useful if multiple loggers have the same column names.
how : string
Method for joining loggers (default = 'inner').
interpolate : boolean
Whether data should be interpolated to fill gaps in rows (default=False).
limit : int
Maximum number of consecutive NaNs to fill if data are interpolated.
Returns
-------
joined : pandas.core.dataframe.DataFrame
DataFrame Object that contains joined DataFrames.
"""
#If no suffixes were passed, create a list full of None values
# this keeps suffixes from being added in the code below
if suffixes==[]:
for i in arange(len(loggerlist)):
suffixes.append(None)
resampledList = []
if type(loggerlist)==list:
#print "Processing list type loggerlist..."
for i,logger in enumerate(loggerlist):
if suffixes[i]!=None:
logger.columns+='_'+suffixes[i]
resampledList.append(logger.resample(interval).mean())
elif type(loggerlist)==dict:
#print "Processing dict type loggerlist..."
for logger_key in list(loggerlist.keys()):
logger = loggerlist[logger_key]
if type(suffixes)==dict:
if suffixes[logger_key]!=None:
logger.columns+='_'+suffixes[logger_key]
resampledList.append(logger.resample(interval).mean())
else:
print("Problem with suffixes. If loggerlist is a dict, suffixes also must be a dict.")
return None
else:
print("Problem with logger list: Need to input a list or dict of DataFrame or Series objects")
return None
for i, logger in enumerate(resampledList):
if i==0:
joined=logger
# elif i==1:
# joined=joined.join(logger, how=how, lsuffix='_'+suffixes[0], rsuffix='_'+suffixes[1])
# elif i==3:
# return joined
else:
joined=joined.join(logger, how=how)#, rsuffix='_'+suffixes[i])
if interpolate:
for col in joined.columns:
# print joined
# print col
filled_col = joined[col].interpolate(limit=limit)
joined[col] = filled_col
return joined
def linear_correction(rawSeries, correctionSeries):
"""
Uses a Pandas Series of spot measured values to linearly correct time series data from a logger.
Parameters
----------
rawSeries : pandas.core.series.Series
A Pandas Series that contains the time series data to be corrected.
correctionSeries : pandas.core.series.Series
A Pandas Series that contains spot measurement data that are to be used to correct rawSeries.
Returns
-------
rawSeries : pandas.core.series.Series
A corrected version of the rawSeries time series data.
"""
#loop through correction series and calculate multiplying factors
corrDict = {}
for date, measurement in correctionSeries.items():
candidates = rawSeries.index[notnull(rawSeries)]
index = candidates.searchsorted(date)
if index > 0:
asOfDate = candidates[index - 1]
this_k = measurement/rawSeries[asOfDate]
corrDict[asOfDate]= this_k
else:
asOfDate = rawSeries.index[0]
if not rawSeries.index[0] in corrDict:
corrDict[rawSeries.index[0]]=1
if not rawSeries.index[-1] in corrDict:
corrDict[rawSeries.index[-1]] = corrDict[asOfDate]
k_series = Series(corrDict)
joined_series = DataFrame({'raw':rawSeries,'k':k_series})
joined_series.k = joined_series.k.interpolate()
rawSeries = rawSeries*joined_series.k
return rawSeries
def manualCompare(logger, manual, value_name='', ltag='_log', mtag='_man'):
"""
Function to extract logger data with same timestamps as manual measurements for comparison. Both data sets are resampled on an hourly interval to assure alignment of indicies.
Parameters
----------
logger : pandas.core.series.Series
A Pandas TimeSeries containing a single column and time stamps as indices.
manual : pandas.core.series.Series
A Pandas TimeSeries containing a single variable from the manual data set
value_name : string
A string to use as the base for the collumn labels in the output DataFrame.
ltag : string
A suffix to be added to the logger column name, or used as the logger column name if value_name is not set.
mtag : string
A suffix to be added to the manual measurement column name, or used as the manual measurement column name if value_name is not set.
Returns
-------
joined : pandas.core.dataframe.DataFrame
A DataFrame object containing values of manual measurements and corresponding values from the logger time series using the aligned index that is resampled to the hour.
"""
if not(value_name==''):
value_name += '_'
logger = resampleHourly(logger)
manual = resampleHourly(manual)
wantidx = manual.index
logger = logger[wantidx]
joined = DataFrame({value_name+ltag:logger, value_name+rtag:manual})
return joined
def shiftLogger(logger, shift_to, align_at_start = True):
"""
Function to extract logger data with same timestamps as manual measurements for comparison. Both data sets are resampled on an hourly interval to assure alignment of indicies.
Parameters
----------
logger : pandas.core.series.Series or pandas.core.dataframe.Dataframe
A Pandas TimeSeries or DataFrame containing time stamps as indices.
shift_to : string
A string that contains the date and time that the logger series should be shifted to. By default this is the correct starting time (first time stamp) of the series.
align_at_start : boolean
If True, shift_to is assumed to represent the correct starting date for the series. If False, shift_to is assumed to represent the correct final date of the series. (default=True)
Returns
-------
logger : pandas.core.series.Series or pandas.core.dataframe.DataFrame
A Series or DataFrame object that contains the correct shifted time stamps.
"""
bad_times = logger.index
#align at starting time stamp
if align_at_start:
start_time = Timestamp(shift_to)
dt = start_time - bad_times[0]
#align at ending time stamp
else:
end_time = Timestamp(shift_to)
dt = end_time - bad_times[-1]
#shift index of original logger time series
logger.index = logger.index + dt
return logger
|
mit
|
nelson-liu/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
sinhrks/scikit-learn
|
examples/applications/topics_extraction_with_nmf_lda.py
|
18
|
3891
|
"""
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
bsd-3-clause
|
renjinghai/models
|
autoencoder/AutoencoderRunner.py
|
9
|
1684
|
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = Autoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
|
apache-2.0
|
huzq/scikit-learn
|
sklearn/metrics/_plot/confusion_matrix.py
|
3
|
9459
|
from itertools import product
import numpy as np
from .. import confusion_matrix
from ...utils import check_matplotlib_support
from ...utils.validation import _deprecate_positional_args
from ...base import is_classifier
class ConfusionMatrixDisplay:
"""Confusion Matrix visualization.
It is recommend to use :func:`~sklearn.metrics.plot_confusion_matrix` to
create a :class:`ConfusionMatrixDisplay`. All parameters are stored as
attributes.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
confusion_matrix : ndarray of shape (n_classes, n_classes)
Confusion matrix.
display_labels : ndarray of shape (n_classes,), default=None
Display labels for plot. If None, display labels are set from 0 to
`n_classes - 1`.
Attributes
----------
im_ : matplotlib AxesImage
Image representing the confusion matrix.
text_ : ndarray of shape (n_classes, n_classes), dtype=matplotlib Text, \
or None
Array of matplotlib axes. `None` if `include_values` is false.
ax_ : matplotlib Axes
Axes with confusion matrix.
figure_ : matplotlib Figure
Figure containing the confusion matrix.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> predictions = clf.predict(X_test)
>>> cm = confusion_matrix(y_test, predictions, labels=clf.classes_)
>>> disp = ConfusionMatrixDisplay(confusion_matrix=cm,
... display_labels=clf.classes_)
>>> disp.plot() # doctest: +SKIP
"""
def __init__(self, confusion_matrix, *, display_labels=None):
self.confusion_matrix = confusion_matrix
self.display_labels = display_labels
@_deprecate_positional_args
def plot(self, *, include_values=True, cmap='viridis',
xticks_rotation='horizontal', values_format=None,
ax=None, colorbar=True):
"""Plot visualization.
Parameters
----------
include_values : bool, default=True
Includes values in confusion matrix.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`,
the format specification is 'd' or '.2g' whichever is shorter.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
colorbar : bool, default=True
Whether or not to add a colorbar to the plot.
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
"""
check_matplotlib_support("ConfusionMatrixDisplay.plot")
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
cm = self.confusion_matrix
n_classes = cm.shape[0]
self.im_ = ax.imshow(cm, interpolation='nearest', cmap=cmap)
self.text_ = None
cmap_min, cmap_max = self.im_.cmap(0), self.im_.cmap(256)
if include_values:
self.text_ = np.empty_like(cm, dtype=object)
# print text with appropriate color depending on background
thresh = (cm.max() + cm.min()) / 2.0
for i, j in product(range(n_classes), range(n_classes)):
color = cmap_max if cm[i, j] < thresh else cmap_min
if values_format is None:
text_cm = format(cm[i, j], '.2g')
if cm.dtype.kind != 'f':
text_d = format(cm[i, j], 'd')
if len(text_d) < len(text_cm):
text_cm = text_d
else:
text_cm = format(cm[i, j], values_format)
self.text_[i, j] = ax.text(
j, i, text_cm,
ha="center", va="center",
color=color)
if self.display_labels is None:
display_labels = np.arange(n_classes)
else:
display_labels = self.display_labels
if colorbar:
fig.colorbar(self.im_, ax=ax)
ax.set(xticks=np.arange(n_classes),
yticks=np.arange(n_classes),
xticklabels=display_labels,
yticklabels=display_labels,
ylabel="True label",
xlabel="Predicted label")
ax.set_ylim((n_classes - 0.5, -0.5))
plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
self.figure_ = fig
self.ax_ = ax
return self
@_deprecate_positional_args
def plot_confusion_matrix(estimator, X, y_true, *, labels=None,
sample_weight=None, normalize=None,
display_labels=None, include_values=True,
xticks_rotation='horizontal',
values_format=None,
cmap='viridis', ax=None, colorbar=True):
"""Plot Confusion Matrix.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to reorder or
select a subset of labels. If `None` is given, those that appear at
least once in `y_true` or `y_pred` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
display_labels : array-like of shape (n_classes,), default=None
Target names used for plotting. By default, `labels` will be used if
it is defined, otherwise the unique labels of `y_true` and `y_pred`
will be used.
include_values : bool, default=True
Includes values in confusion matrix.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`,
the format specification is 'd' or '.2g' whichever is shorter.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
colorbar : bool, default=True
Whether or not to add a colorbar to the plot.
.. versionadded:: 0.24
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
See Also
--------
confusion_matrix :
Compute confusion matrix to evaluate the accuracy of a classification
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import plot_confusion_matrix
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> plot_confusion_matrix(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
check_matplotlib_support("plot_confusion_matrix")
if not is_classifier(estimator):
raise ValueError("plot_confusion_matrix only supports classifiers")
y_pred = estimator.predict(X)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight,
labels=labels, normalize=normalize)
if display_labels is None:
if labels is None:
display_labels = estimator.classes_
else:
display_labels = labels
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=display_labels)
return disp.plot(include_values=include_values,
cmap=cmap, ax=ax, xticks_rotation=xticks_rotation,
values_format=values_format, colorbar=colorbar)
|
bsd-3-clause
|
LCAS/zoidbot
|
vrep_teleop/scripts/teleop_data_logging.py
|
1
|
7445
|
#!/usr/bin/env python
# run the baxterTeleopRecording.ttt file on Vrep before running this
import rospy
from baxter_core_msgs.msg import DigitalIOState
from sensor_msgs.msg import JointState
from vrep_teleop.msg import Joints
import numpy as np
import matplotlib.pyplot as plt
class RecordData:
def __init__(self):
self.left_button_pressed = 0
self.left_cuff_pressed = 0
self.right_cuff_pressed = 0
self.record = 0
self.start = 0
self.end = 0
self.n = 14
self.z = 0
self.currentPos = []
self.targetPos = []
self.newTargetPos = []
self.errorValue = []
self.targetVel = []
self.currentVel = []
self.newTargetVel = []
self.effort = []
self.ts = []
self.f1 = open("/home/user/turnDemoMaster_1.txt", "w+")
self.f2 = open("/home/user/turnDemoSlave_1.txt", "w+")
rospy.Subscriber('robot/digital_io/left_lower_button/state', DigitalIOState, self.left_button)
rospy.Subscriber('robot/digital_io/left_lower_cuff/state', DigitalIOState, self.left_cuff)
rospy.Subscriber('robot/digital_io/right_lower_cuff/state', DigitalIOState, self.right_cuff)
rospy.Subscriber("/robot/joint_states", JointState, self.master_state)
rospy.Subscriber("/vrep/joints", Joints, self.slave_state)
def left_button(self, data):
if data.state == 1:
self.left_button_pressed = 1
self.start = 1
else:
self.left_button_pressed = 0
if self.start == 1 and self.record == 1:
self.record = 0
self.end = 1
def left_cuff(self, data):
if data.state == 1:
self.left_cuff_pressed = 1
else:
self.left_cuff_pressed = 0
def right_cuff(self, data):
if data.state == 1:
self.right_cuff_pressed = 1
else:
self.right_cuff_pressed = 0
if (self.left_cuff_pressed == 1 or self.right_cuff_pressed == 1) and self.start == 1:
self.record = 1
self.start = 0
def master_state(self, data):
if self.record == 1:
self.f1.write("%s \n" % data)
elif self.end == 1:
self.f1.close()
def slave_state(self, data):
if self.record == 1:
self.currentPos.extend(data.currentPos)
self.targetPos.extend(data.targetPos)
self.newTargetPos.extend(data.newTargetPos)
self.errorValue.extend(data.errorValue)
self.targetVel.extend(data.targetVel)
self.currentVel.extend(data.currentVel)
self.newTargetVel.extend(data.newTargetVel)
self.effort.extend(data.effort)
self.ts.extend(data.simTime)
self.z = self.z + 1
rospy.loginfo("master %s", data.seq)
self.f2.write("seq ")
self.f2.write("%d " % data.seq)
self.f2.write("\n")
self.f2.write("timeStamp ")
self.f2.write("%f " % data.timeStamp)
self.f2.write("\n")
self.f2.write("currentPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.currentPos[i]))
self.f2.write("\n")
self.f2.write("targetPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.targetPos[i]))
self.f2.write("\n")
self.f2.write("newTargetPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.newTargetPos[i]))
self.f2.write("\n")
self.f2.write("errorValue ")
for i in range(0, self.n):
self.f2.write("%f " % (data.errorValue[i]))
self.f2.write("\n")
self.f2.write("targetVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.targetVel[i]))
self.f2.write("\n")
self.f2.write("currentVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.currentVel[i]))
self.f2.write("\n")
self.f2.write("newTargetVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.newTargetVel[i]))
self.f2.write("\n")
self.f2.write("effort ")
for i in range(0, self.n):
self.f2.write("%f " % (data.effort[i]))
self.f2.write("\n")
self.f2.write("simTime ")
for i in range(0, self.n):
self.f2.write("%f " % (data.simTime[i]))
self.f2.write("\n")
self.f2.write("boxPosition ")
for i in range(0, 3):
self.f2.write("%f " % (data.boxPosition[i]))
self.f2.write("\n")
self.f2.write("boxOrientation ")
for i in range(0, 3):
self.f2.write("%f " % (data.boxOrientation[i]))
self.f2.write("\n")
elif self.end == 1:
self.f2.close()
if __name__ == '__main__':
rospy.init_node('trajectory_listener', anonymous=True)
try:
startRecord = RecordData()
while startRecord.end == 0:
pass
totalRead = startRecord.z
currentPos = np.array(startRecord.currentPos).reshape((totalRead, startRecord.n))
targetPos = np.array(startRecord.targetPos).reshape((totalRead, startRecord.n))
newTargetPos = np.array(startRecord.newTargetPos).reshape((totalRead, startRecord.n))
errorValue = np.array(startRecord.errorValue).reshape((totalRead, startRecord.n))
targetVel = np.array(startRecord.targetVel).reshape((totalRead, startRecord.n))
currentVel = np.array(startRecord.currentVel).reshape((totalRead, startRecord.n))
newTargetVel = np.array(startRecord.newTargetVel).reshape((totalRead, startRecord.n))
effort = np.array(startRecord.effort).reshape((totalRead, startRecord.n))
ts = np.array(startRecord.ts).reshape((totalRead, startRecord.n))
for i in range(0, startRecord.n):
plt.figure(i+1)
plt.figure(i+1).suptitle('Joint'+str(i+1))
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], targetPos[:, i], '.', label='Master')
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], newTargetPos[:, i], '. r', label='Master_Corrected')
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], currentPos[:, i], '. g', label='Slave')
plt.legend()
plt.xlabel('Time(in sec)')
plt.ylabel('Joint Angles(in Radians)')
plt.subplot(312)
plt.plot(ts[:, i]-ts[0, 0], errorValue[:, i], '.')
plt.xlabel('Time(in sec)')
plt.ylabel('Position Error(in Radians)')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], targetVel[:, i], '.', label='Master_Velocity')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], newTargetVel[:, i], '. r', label='Master_Velocity_Corrected')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], currentVel[:, i], '. g', label='Slave_Velocity')
plt.legend()
plt.xlabel('Time(in sec)')
plt.ylabel('Joint Velocities(in Radians/sec)')
# plt.figure(i+1).savefig("jn"+str(i+1)+".png")
plt.show()
except rospy.ROSInterruptException:
pass
|
mit
|
richardwolny/sms-tools
|
lectures/03-Fourier-properties/plots-code/symmetry.py
|
26
|
1178
|
import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append('../../../software/models/')
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
w = np.hamming(511)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
fftbuffer = np.zeros(N)
x1 = x[pin-hM1:pin+hM2]
xw = x1*w
fftbuffer[:hM1] = xw[hM2:]
fftbuffer[N-hM2:] = xw[:hM2]
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X))
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.ylabel('amplitude')
plt.title('x (soprano-E4.wav)')
plt.subplot(3,1,2)
plt.plot(np.arange(-N/2,N/2), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,-48,max(mX)])
plt.title ('mX = 20*log10(abs(X))')
plt.ylabel('amplitude (dB)')
plt.subplot(3,1,3)
plt.plot(np.arange(-N/2,N/2), pX, 'c', lw=1.5)
plt.axis([-N/2,N/2,min(pX),max(pX)])
plt.title ('pX = unwrap(angle(X))')
plt.ylabel('phase (radians)')
plt.tight_layout()
plt.savefig('symmetry.png')
plt.show()
|
agpl-3.0
|
heli522/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
rubikloud/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
93
|
25649
|
# Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
|
bsd-3-clause
|
swift-nav/peregrine
|
peregrine/analysis/print_track_res.py
|
2
|
2431
|
#!/usr/bin/env python
# Copyright (C) 2016 Swift Navigation Inc.
# Contact: Adel Mamin <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import numpy as np
import matplotlib.pyplot as plt
import argparse
from peregrine import defaults
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", default="tracking_res.csv",
help="the input CSV file to process")
parser.add_argument("-p", "--par-to-print", default="CN0",
help="parameter to print")
parser.add_argument("--profile",
choices=['peregrine', 'custom_rate', 'low_rate',
'normal_rate', 'piksi_v3', 'high_rate'],
metavar='PROFILE',
help="L1C/A & L2C IF + sampling frequency profile"
"('peregrine'/'custom_rate', 'low_rate', "
"'normal_rate', 'piksi_v3', 'high_rate')",
default='peregrine')
args = parser.parse_args()
if args.profile == 'peregrine' or args.profile == 'custom_rate':
freq_profile = defaults.freq_profile_peregrine
elif args.profile == 'low_rate':
freq_profile = defaults.freq_profile_low_rate
elif args.profile == 'normal_rate':
freq_profile = defaults.freq_profile_normal_rate
elif args.profile == 'high_rate':
freq_profile = defaults.freq_profile_high_rate
else:
raise NotImplementedError()
fig = plt.figure()
plt.title(args.par_to_print.replace('_', ' ').title() + ' vs Time')
ax1 = fig.add_subplot(111)
plt.ylabel(args.par_to_print.replace('_', ' ').title(), color='b')
plt.xlabel('Time [s]')
data = np.genfromtxt(args.file, dtype=float, delimiter=',', names=True)
time_stamps = np.array(data['sample_index'])
time_stamps = time_stamps - data['sample_index'][0]
time_stamps = time_stamps / freq_profile['sampling_freq']
plt.plot(time_stamps, np.array(data[args.par_to_print]), 'r.')
plt.legend(loc='upper right')
plt.grid()
plt.axis('tight')
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
michigraber/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
45
|
15091
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = assert_warns(DeprecationWarning, make_multilabel_classification,
n_samples=100, n_features=20, n_classes=3,
random_state=0, allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
return_indicator=True, allow_unlabeled=allow_unlabeled,
return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
Aasmi/scikit-learn
|
sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
bsd-3-clause
|
mvpossum/machine-learning
|
tp4/plot_table_dist.py
|
1
|
1691
|
#! /usr/bin/env python
import sys
import os
from sys import argv
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
FILE = argv[1]
PLOT_FILE = os.path.splitext(FILE)[0]+'.png'
ERROR = 'er' in FILE.lower()
legend = argv[2:]
cols = len(legend)
if cols>=4:
linestyles = ['-', '-', '--', '--', '--', '-', '--', '-', '--', '-', '--', '-']
colors = ['r', 'b', 'r', 'b', 'g', 'g', 'orange', 'orange', 'purple', 'purple', 'y', 'y', 'gray', 'gray']
elif cols==3:
linestyles = ['-', '-', '-']
colors = ['b', 'g', 'r']
else:
linestyles = ['-','-']
colors = ['r', 'b']
x = []
y = [[] for _ in range(cols)]
for line in open(FILE):
if line.strip():
line = [float(s) for s in line.split(' ') if s.strip()]
x.append(line[0])
for j in range(cols):
y[j].append(line[j+1])
fig, ax = plt.subplots()
ax = plt.subplot(111)
FONT_SIZE = 18
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(FONT_SIZE)
for yv in range(cols):
ax.plot(x, y[yv], label=legend[yv], linestyle=linestyles[yv], color=colors[yv])
#~ if ERROR:
#ax.set_ylim(9,60)
#~ else:
#~ ax.set_ylim(0,30)
#ax.set_xlim(0,128)
#~ box = ax.get_position()
#~ ax.set_position([box.x0, box.y0, box.width * 0.62, box.height])
#~ ax.legend(prop={'size':FONT_SIZE}, bbox_to_anchor=(1, 1.0))
ax.legend(prop={'size':FONT_SIZE})
plt.xlabel('Dimensiones', size=FONT_SIZE)
#~ plt.xscale('log')
ylabel = 'Error (%)' if ERROR else 'D'
plt.ylabel(ylabel, size=FONT_SIZE)
plt.savefig(PLOT_FILE)
plt.show()
#~ (\d+)_\d+\.report (\d+\.\d+) (\d+\.\d+)
#~ ,\1,\2
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.