repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
automl/SpySMAC
|
cave/utils/bokeh_routines.py
|
1
|
7662
|
import logging
from bokeh.models import (CustomJS, ColumnDataSource)
from bokeh.models.widgets import (RadioButtonGroup, CheckboxButtonGroup, Button, DataTable, TableColumn)
def get_checkbox(glyph_renderers, labels, max_checkbox_length=None):
"""
Parameters
----------
glyph_renderers: List[List[Renderer]]
list of glyph-renderers
labels: List[str]
list with strings to be put in checkbox
max_checkbox_length: None or int
maximum number of checkboxes in a single CheckboxButtonGroup, splits up the checkboxes over multiple
CheckboxButtonGroups (e.g. to simulate "line-breaks" in the visual layout)
NOTE: if this is not None, will return List[CheckboxButtonGroup]
Returns
-------
checkbox: CheckboxButtonGroup or List[CheckboxButtonGroup]
checkbox object (or list of checkbox-objects)
select_all: Button
button related to checkbox
select_none: Button
button related to checkbox
"""
code, args_checkbox = _prepare_nested_glyphs(glyph_renderers)
# Toggle all renderers in a subgroup, if their domain is set to active
code += """
for (i = 0; i < len_labels; i++) {
if (cb_obj.active.includes(i)) {
// console.log('Setting to true: ' + i + '(' + glyph_renderers[i].length + ')')
for (j = 0; j < glyph_renderers[i].length; j++) {
glyph_renderers[i][j].visible = true;
// console.log('Setting to true: ' + i + ' : ' + j)
}
} else {
// console.log('Setting to false: ' + i + '(' + glyph_renderers[i].length + ')')
for (j = 0; j < glyph_renderers[i].length; j++) {
glyph_renderers[i][j].visible = false;
// console.log('Setting to false: ' + i + ' : ' + j)
}
}
}
"""
# Create the actual checkbox-widget
callback = CustomJS(args=args_checkbox, code=code)
checkbox = CheckboxButtonGroup(labels=labels, active=list(range(len(labels))), callback=callback)
# Select all/none:
handle_list_as_string = str(list(range(len(glyph_renderers))))
code_button_tail = "checkbox.active = labels;" + code.replace('cb_obj', 'checkbox')
select_all = Button(label="All", callback=CustomJS(args=dict({'checkbox': checkbox}, **args_checkbox),
code="var labels = {}; {}".format(
handle_list_as_string, code_button_tail)))
select_none = Button(label="None", callback=CustomJS(args=dict({'checkbox': checkbox}, **args_checkbox),
code="var labels = {}; {}".format('[]', code_button_tail)))
if max_checkbox_length:
# Keep all and none buttons, but create new checkboxes and return a list
slices = list(range(0, len(glyph_renderers), max_checkbox_length)) + [len(glyph_renderers)]
checkboxes = [get_checkbox(glyph_renderers[s:e], labels[s:e])[0] for s, e in zip(slices[:-1], slices[1:])]
return checkboxes, select_all, select_none
return checkbox, select_all, select_none
def get_radiobuttongroup(glyph_renderers, labels):
"""
Parameters
----------
glyph_renderers: List[List[Renderer]]
list of glyph-renderers
labels: List[str]
list with strings to be put in widget
Returns
-------
radiobuttongroup: RadioButtonGroup
radiobuttongroup widget to select one of the elements
"""
code, args = _prepare_nested_glyphs(glyph_renderers)
# Toggle all renderers in a subgroup, if their domain is set to active
code += """
for (i = 0; i < len_labels; i++) {
if (cb_obj.active === i) {
console.log('Setting to true: ' + i + '(' + glyph_renderers[i].length + ')')
for (j = 0; j < glyph_renderers[i].length; j++) {
glyph_renderers[i][j].visible = true;
console.log('Setting to true: ' + i + ' : ' + j)
}
} else {
console.log('Setting to false: ' + i + '(' + glyph_renderers[i].length + ')')
for (j = 0; j < glyph_renderers[i].length; j++) {
glyph_renderers[i][j].visible = false;
console.log('Setting to false: ' + i + ' : ' + j)
}
}
}
"""
# Create the actual checkbox-widget
callback = CustomJS(args=args, code=code)
radio = RadioButtonGroup(labels=labels, active=0, callback=callback)
return radio
def _prepare_nested_glyphs(glyph_renderers):
# First create a consecutive list of strings named glyph_renderer_i for i in len(all_renderers)
num_total_lines = sum([len(group) for group in glyph_renderers])
aliases_flattened = ['glyph_renderer' + str(i) for i in range(num_total_lines)]
# Make that list nested to sum up multiple renderers in one checkbox
aliases = []
start = 0
for group in glyph_renderers:
aliases.append(aliases_flattened[start: start+len(group)])
start += len(group)
# Flatten renderers-list to pass it to the CustomJS properly
glyph_renderers_flattened = [a for b in glyph_renderers for a in b]
args = {name: glyph for name, glyph in zip(aliases_flattened, glyph_renderers_flattened)}
# Create javascript-code
code = "len_labels = " + str(len(aliases)) + ";"
# Create nested list of glyph renderers to be toggled by a button
code += "glyph_renderers = [{}];".format(
','.join(['[' + ','.join([str(idx) for idx in group]) + ']' for group in aliases]))
return code, args
def array_to_bokeh_table(df, sortable=None, width=None, logger=None):
"""
Create bokeh-table from array.
Parameters
----------
df: pandas.DataFrame
dataframe with columns and index set
sortable: dict(str : boolean)
columns that should be sortable, default none
width: dict(str : int)
width of columns, default 100 for all
logger: logging.Logger
logger to use, if not set use default
Returns
-------
bokeh_table: bokeh.models.widgets.DataTable
bokeh object
"""
if logger is None:
logger = logging.getLogger('cave.utils.bokeh_routines.array_to_bokeh_table')
if sortable is None:
sortable = {}
if width is None:
width = {}
columns = list(df.columns.values)
data = dict(df[columns])
# Sanity checks
for attr, d in {'width': width, 'sortable': sortable}.items():
diff = set(d.keys()).difference(set(columns))
if len(diff) > 0:
logger.debug("For attr %s with value %s and columns %s there is a diff %s", attr, d, columns, diff)
raise ValueError("Illegal table description! Trying to specify '%s' for the following columns, but they "
"are not present in DataFrame: %s!" % (attr, diff))
source = ColumnDataSource(data)
columns = [TableColumn(field=header, title=header,
sortable=sortable.get(header, False),
default_sort='descending',
width=width.get(header, 100)) for header in columns
]
data_table = DataTable(source=source,
columns=columns,
height=20 + 30 * len(list(data.values())[0]),
index_position=None, # Disable index-column
)
return data_table
|
bsd-3-clause
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_amoeba_response.py
|
1
|
4052
|
"""
Make a histogram of normally distributed random numbers and plot the
analytic PDF over it
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import matplotlib.image as mpimg
import PVReadWeights as rw
import PVReadSparse as rs
import math
"""
mi=mpimg.imread(sys.argv[3])
imgplot = plt.imshow(mi, interpolation='Nearest')
imgplot.set_cmap('hot')
plt.show()
"""
def nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
ia = a
if ia < 2:
k0 = 0
else:
k0 = ia/2 - 1
if a < 1.0 and kzPre < 0:
k = kzPre - (1.0/a) + 1
else:
k = kzPre
return k0 + (a * k)
def zPatchHead(kzPre, nzPatch, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
if a == 1:
shift = -(0.5 * nzPatch)
return shift + nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
shift = 1 - (0.5 * nzPatch)
if (nzPatch % 2) == 0 and a < 1:
kpos = (kzPre < 0)
if kzPre < 0:
kpos = -(1+kzPre)
else:
kpos = kzPre
l = (2*a*kpos) % 2
if kzPre < 0:
shift -= l == 1
else:
shift -= l == 0
elif (nzPatch % 2) == 1 and a < 1:
shift = -(0.5 * nzPatch)
neighbor = nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
if nzPatch == 1:
return neighbor
return shift + neighbor
"""
a = zPatchHead(int(sys.argv[1]), 5, -math.log(4, 2), -math.log(1, 2))
print a
print int(a)
sys.exit()
"""
vmax = 100.0 # Hz
space = 1
extended = False
w = rw.PVReadWeights(sys.argv[1])
wOff = rw.PVReadWeights(sys.argv[2])
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
predub = np.zeros(((nx*nx),(nxp * nxp)))
predubOff = np.zeros(((nx*nx),(nxp * nxp)))
numpat = w.numPatches
print "numpat = ", numpat
for k in range(numpat):
p = w.next_patch()
pOff = wOff.next_patch()
predub[k] = p
predubOff[k] = pOff
print "weights done"
#print "p = ", P
#if k == 500:
# sys.exit()
#end fig loop
activ = rs.PVReadSparse(sys.argv[3], extended)
end = int(sys.argv[4])
step = int(sys.argv[5])
begin = int(sys.argv[6])
count = 0
for end in range(begin+step, end, step):
A = activ.avg_activity(begin, end)
this = 7 + count
count += 1
print "this = ", this
print "file = ", sys.argv[this]
print
numrows, numcols = A.shape
min = np.min(A)
max = np.max(A)
s = np.zeros(numcols)
for col in range(numcols):
s[col] = np.sum(A[:,col])
s = s/numrows
b = np.reshape(A, (len(A)* len(A)))
c = np.shape(b)[0]
mi=mpimg.imread(sys.argv[this])
print "a w start"
rr = nx / 64
im = np.zeros((64, 64))
for yi in range(len(A)):
for xi in range(len(A)):
x = int(zPatchHead(int(xi), 5, -math.log(rr, 2), -math.log(1, 2)))
y = int(zPatchHead(int(yi), 5, -math.log(rr, 2), -math.log(1, 2)))
if 58 > x >= 0 and 58 > y >= 0:
if A[yi, xi] > 0:
patch = predub[yi * (nx) + xi]
patchOff = predubOff[yi * (nx) + xi]
patch = np.reshape(patch, (nxp, nxp))
patchOff = np.reshape(patchOff, (nxp, nxp))
for yy in range(nyp):
for xx in range(nxp):
im[y + yy, x + xx] += patch[yy, xx] * A[yi, xi]
im[y + yy, x + xx] -= patchOff[yy, xx] * A[yi, xi]
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
ax.imshow(mi, interpolation='Nearest', cmap='gray')
ax = fig.add_subplot(3,1,2)
#ax.imshow(mi, interpolation='Nearest', cmap='gray', origin="lower")
ax.set_xlabel('activity')
ax.imshow(A, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(A))
ax = fig.add_subplot(313)
ax.set_xlabel('image reconstruction')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(im))
plt.show()
#end fig loop
|
epl-1.0
|
neale/CS-program
|
434-MachineLearning/final_project/linearClassifier/sklearn/utils/tests/test_sparsefuncs.py
|
78
|
17611
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
|
unlicense
|
AlexRobson/scikit-learn
|
examples/linear_model/plot_sgd_weighted_samples.py
|
344
|
1458
|
"""
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
rodriggs/pipeline
|
preprocessing/preprocessing.py
|
2
|
21493
|
import numpy as np
import pandas as pd
from collections import Counter
import scipy.stats as scs
from scipy.stats import skew, pearsonr, spearmanr, kendalltau
import cPickle as pickle
import string
from edatools import compare_missing_values, count_data, summary_stats, printall
class data_transporter(object):
"""
A 'data' class that will contain all of the data, for ease of use. This class can be used to quickly access the different types of variables (continuous, discrete, ordinal, nominal). Determined by examining data.
"""
def __init__(self, filename):
"""
Instantiate class and initialize dataframe, id, and labels if training data set.
"""
print 'Instantiate data class\n'
self.package = self.load_data(filename)
self.unpack(self.package)
# Load feature labels
self.load_feature_labels()
def load_data(self, filename):
"""
Load data and add variables to dataframes,
"""
print "Reading in data from csv\n"
df_train = pd.read_csv(filename[0])
df_test = pd.read_csv(filename[1])
# Replace individual data points
# The sample with ID 666 has GarageArea, GarageCars, and GarageType
# but none of the other fields, so use the mode and median to fill them in.
df_test.loc[666, 'GarageFinish'] = 'Unf'
df_test.loc[666, 'GarageCond'] = 'TA'
df_test.loc[666, 'GarageQual'] = 'TA'
df = pd.concat((df_train.loc[:,'MSSubClass':'SaleCondition'],
df_test.loc[:,'MSSubClass':'SaleCondition']))
df.reset_index(inplace=True, drop=True)
df_train_id = df_train['Id']
df_test_id = df_test['Id']
y = df_train['SalePrice'].get_values()
# Package that will be unpacked
package = [df, df_train_id, df_test_id, y]
return package
def unpack(self, package):
"""
INPUT: Package of raw data
OUPUT: No output returned simply defining instance variables
"""
print "Loading data on transporter\n"
self.df = package[0]
self.df_train_id = package[1]
self.df_test_id = package[2]
self.y = package[3]
self.original_features = self.df.columns.unique()
self.X = 0
self.X_pred = 0
self.df_train = 0
self.df_test = 0
self.df_pretransform = 0
def continuous(self):
"""
Returns all of the continuous variable labels
"""
c = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch',
'PoolArea', 'MiscVal']
self.continuous_feat = c
def discrete(self):
"""
Returns all of the discrete variable labels
"""
d = ['YearBuilt', 'YearRemodAdd','BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageYrBlt', 'GarageCars', 'MoSold', 'YrSold']
self.discrete_feat = d
def nominal(self):
"""
Returns all of the nominal variable labels
"""
n = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotConfig', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'CentralAir', 'Electrical', 'Functional', 'GarageType', 'PavedDrive', 'MiscFeature']
self.nominal_feat = n
def ordinal(self):
"""
Returns all of the ordinal variable labels
"""
o = ['LotShape', 'LandContour', 'Utilities', 'LandSlope', 'OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageFinish', 'GarageQual', 'GarageCond', 'PoolQC', 'Fence', 'SaleType', 'SaleCondition']
self.ordinal_feat = o
def constructed(self):
"""
Returns all constructed variable labels
"""
self.constructed_feat = []
self.mapped_feat = []
self.removed_feat = []
self.binary_feat = []
def load_feature_labels(self):
"""
Load labels onto data class
"""
print 'Load class labels\n'
self.continuous()
self.discrete()
self.nominal()
self.ordinal()
self.constructed()
def missing_values(dt):
"""
Replacing all of the NaN's with values that were determined by examining the individual rows that were missing values as well as surrounding rows and the number of values for each label in a feature
"""
# Find null values using all_data[pd.isnull(all_data['PoolQC'])]
print 'Replace missing values\n'
df = dt.df
# Replace garage year with year built if null
df.GarageYrBlt.fillna(df.YearBuilt, inplace=True)
# MSZoning is replaced with the most common value within each Neighborhood as most
zoning = {}
for N in df.Neighborhood.unique().tolist():
zoning[N] = df[df['Neighborhood'] == N]['MSZoning'].mode()[0]
mask = df['MSZoning'].isnull()
df.loc[mask, 'MSZoning'] = df.loc[mask, 'Neighborhood'].map(zoning)
# Replace nulls with most common values
D = {
'Exterior1st': 'VinylSd', 'Exterior2nd': 'VinylSd',
'Utilities': 'AllPub', 'Electrical': 'SBrkr',
'Functional': 'Typ', 'SaleType': 'WD'
}
for k, v in D.iteritems():
df[k].fillna(value=v, inplace=True)
# Replace null with None
col_none = [
'Alley', 'MasVnrType',
'Fence', 'MiscFeature',
'GarageType', 'BsmtExposure',
'PoolQC', 'BsmtQual',
'BsmtCond', 'BsmtFinType1',
'BsmtFinType2', 'KitchenQual',
'FireplaceQu', 'GarageQual',
'GarageCond', 'GarageFinish'
]
for col in col_none:
df[col].fillna(value='None', inplace=True)
# Replace null with 0
col_0 = [
'BsmtFullBath', 'BsmtHalfBath', 'GarageYrBlt',
'GarageCars', 'GarageArea', 'BsmtFinSF1',
'BsmtFinSF2', 'BsmtUnfSF', 'MasVnrArea',
'PoolArea', 'MiscVal', 'LotFrontage',
'PoolArea', 'TotalBsmtSF'
]
for col in col_0:
df[col].fillna(value=0, inplace=True)
# Convert to floats
df['OverallQual'] = df['OverallQual'].astype(float)
df['OverallCond'] = df['OverallCond'].astype(float)
dt.df = df
return dt
# New variable construction
### Use self.function to place functions below inside the class
def load_new_features(dt):
"""
Add new columns to data frame
"""
print 'Load new features\n'
df = dt.df
# Additions of new features
# The total square feet of the house: float64
df['TotSqFt'] = (df['TotalBsmtSF'] + df['GrLivArea']).astype(float)
# The time since the house was sold, 2010 base year
df['TimeSinceSold'] = ((2010 - df['YrSold']) * 1).astype(float)
# The number of bathroooms in the house: float64
df['TotBath'] = (df['BsmtHalfBath'] + df['BsmtFullBath'] + df['FullBath'] + df['HalfBath']).astype(float)
# How old the house was at the time of sale 0, 1
df['SaleAge'] = (df['YrSold'] - df['YearBuilt']).astype(float)
df['SaleAge'].replace(to_replace=-1, value=0, inplace=True)
# How many years has it been since the remodel: 0,1
df['YrSinceRemodel'] = (df['YrSold'] - df['YearRemodAdd']).astype(float)
df['YrSinceRemodel'].replace({-2:0, -1:0}, inplace=True)
# Is the square footage greater than two standard deviations from the mean? sq ft: 0, 1
PremiumSQ = df.TotSqFt.mean() + 2 * df.TotSqFt.std()
df['Premium'] = (df['TotSqFt'] > PremiumSQ) * 1
# Is the garage detached: 0, 1
df['IsGarageDetached'] = (df['GarageType'] == 'Detchd') * 1
# Most have a paved drive so treat dirt/gravel and partial pavement as 'not paved': 0,1
df['IsPavedDrive'] = (df['PavedDrive'] == 'Y') * 1
# The only interesting 'misc. feature' is the presence of a shed: 0,1
df['HasShed'] = (df['MiscFeature'] == 'Shed') * 1
# If YearRemodAdd != YearBuilt, then a remodeling took place at some point : 0,1
df['Remodeled'] = (df['YearRemodAdd'] != df['YearBuilt']) * 1
# Did a remodeling happen in the year the house was sold?: 0,1
df['RecentRemodel'] = (df['YearRemodAdd'] == df['YrSold']) * 1
# Was house sold in the year it was built?: 0,1
df['VeryNewHouse'] = (df['YearBuilt'] == df['YrSold']) * 1
# Features a result of specific labels 0, 1
df['Has2ndFloor'] = (df['2ndFlrSF'] == 0) * 1
df['HasMasVnr'] = (df['MasVnrArea'] == 0) * 1
df['HasWoodDeck'] = (df['WoodDeckSF'] == 0) * 1
df['HasOpenPorch'] = (df['OpenPorchSF'] == 0) * 1
df['HasEnclosedPorch'] = (df['EnclosedPorch'] == 0) * 1
df['Has3SsnPorch'] = (df['3SsnPorch'] == 0) * 1
df['HasScreenPorch'] = (df['ScreenPorch'] == 0) * 1
# Is the house in a residential district: 1, 0
df['Residential'] = df['MSZoning'].isin(['C (all)', 'FV']) * 1
# Is the house level: 1, 0
df['Level'] = df['LandContour'].isin(['Bnk', 'Low', 'HLS']) * 1
# Does the house have Shingles: 0, 1
df['HasShingles'] = df['RoofMatl'].isin(['ClyTile', 'Membran', 'Metal', 'Roll', 'Tar&Grv', 'WdShake', 'WdShngl']) * 1
# Does the house have a gas furnace: 0, 1
df['GasFurance'] = df['Electrical'].isin(['FuseA', 'FuseF', 'FuseP', 'Mix']) * 1
# Circuit Breakers: 0, 1
df['CircuitBreaker'] = df['LandContour'].isin(['Bnk', 'Low', 'HLS']) * 1
# Typical home functionality? :0, 1
df['TypHomeFunc'] = df['Functional'].isin(['Maj1', 'Maj2', 'Min1', 'Min2', 'Mod', 'Sev']) * 1
# Is the dive paved?: 0, 1
df['Paved'] = df['PavedDrive'].isin(['N', 'P']) * 1
# There is no fence: 0, 1
df['NoFence'] = df['Fence'].isin(['GdPrv', 'GdWo', 'MnPrv', 'MnWw']) * 1
# Is it a conventional warranty deed? 0, 1
df['ConvWarrantyDeed'] = df['SaleType'].isin(['COD','CWD', 'Con', 'ConLD', 'ConLI', 'ConLw', 'New', 'Oth']) * 1
# Was the sale condition normal?: 0, 1
df['NormalSaleCondition'] = df['SaleCondition'].isin(['Abnorml', 'AdjLand', 'Alloca', 'Family', 'Partial']) * 1
# Regular lot shape": 0, 1
df['RegLotShape'] = df['LotShape'].isin(['IR3', 'IR2', 'IR1']) * 1
# Worst time to buy July/Aug/Nov/Dec/Jan/Feb: 0, 1
df['BestBuyTime'] = df['MoSold'].isin([3, 4, 5, 6, 9, 10]) * 1
# Append constructed feature names
dt.constructed_feat = ['TotSqFt', 'TimeSinceSold', 'TotBath', 'SaleAge',
'YrSinceRemodel', 'Premium', 'IsPavedDrive',
'HasShed', 'Remodeled', 'RecentRemodel',
'VeryNewHouse', 'Has2ndFloor', 'HasMasVnr',
'HasWoodDeck', 'HasOpenPorch', 'HasEnclosedPorch',
'Has3SsnPorch', 'HasScreenPorch', 'TimeSinceSold',
'Residential', 'Level', 'HasShingles',
'GasFurance', 'CircuitBreaker', 'TypHomeFunc',
'Paved', 'NoFence', 'ConvWarrantyDeed',
'NormalSaleCondition', 'RegLotShape', 'BestBuyTime']
binary_features = df[dt.constructed_feat].select_dtypes(include=['int64']).columns.unique()
dt.binary_feat = binary_features.tolist()
dt.df = df
return dt
def map_ordinal_feat(dt):
"""
Ordinal features are mapped to numerical representations based on the labels of the individual columns, both the new features and old features are kept and will be fed to feature selection models
"""
df = dt.df
print 'Mapping ordinal features\n'
# MSSubClass map digits to alphabetic labels for type of dwelling so models dont treat as numerical data
subclass = [20, 30, 40, 45, 50, 60, 70, 75, 80, 85, 90, 120, 150, 160, 180, 190]
alpha = list(string.ascii_uppercase)
sub_list = zip(subclass, alpha[:len(subclass)])
sub_mapping = dict(sub_list)
df['MSSubClass'] = df['MSSubClass'].map(sub_mapping)
# Slope Mapping
slope = ['Gtl', 'Mod', 'Sev']
s_list = zip(slope, xrange(0, len(slope)))
sloped_mapping = dict(s_list)
df['map_LandSlope'] = df['LandSlope'].map(sloped_mapping).astype(float)
# Exposure
exposure = ['None', 'No', 'Mn', 'Av', 'Gd']
ex_list = zip(exposure, xrange(0, len(exposure)))
ex_mapping = dict(ex_list)
df['BsmtExposure'] = df['BsmtExposure'].map(ex_mapping)
# Garage mapping
garage_mapping = {'None': 0.0, 'Unf': 1.0, 'RFn': 2.0, 'Fin': 3.0}
df['map_GarageFinish'] = df['GarageFinish'].map(garage_mapping).astype(float)
# Fence mapping
fence_mapping = {'None': 0.0, 'MnWw': 1.0, 'GdWo': 2.0, 'MnPrv': 3.0, 'GdPrv': 4.0}
df['map_Fence'] = df['Fence'].map(fence_mapping).astype(float)
df['map_Fence'].fillna(value=0, inplace=True)
ordinals_quality = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PoolQC']
quality = ['None', 'Po', 'Fa', 'TA', 'Gd', 'Ex']
q_list = zip(quality, xrange(0, len(quality)+1))
quality_mapping = dict(q_list)
for column in iter(ordinals_quality):
new_col = '{}'.format(column)
df[new_col] = df[column].map(quality_mapping)
df[new_col].fillna(value=0, inplace=True)
dt.mapped_feat = ['map_LandSlope', 'map_BsmtExposure', 'map_GarageFinish', 'map_Fence']
return dt
def encode_labels(dt):
"""
Creating new dummy features from all categorical feature labels using pandas get_dummies
"""
print 'Encoding labels of nominal and ordinal features\n'
features = dt.ordinal_feat + dt.nominal_feat
remove_feat = ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PoolQC', 'BsmtExposure']
features = [x for x in features if x not in remove_feat]
columns_before = set(dt.df.columns.unique())
# Get dummies and drop the first column
dt.df = pd.get_dummies(dt.df, columns=features, drop_first=True)
# Generate list of binary columns to exclude from standarization
columns_after = set(dt.df.columns.unique())
dt.binary_feat = dt.binary_feat + list(columns_after.difference(columns_before))
return dt
def remove_missing_features(dt):
"""
The following features do not appear in the test data set, in order to avoid over fitting we will remove them (found by running missing values in edatools)
"""
print 'Dropping features that dont show up in training set\n'
D = compare_missing_values()
# generate column names from missing values dictionary, clean up
drop_feats = []
for k,v in D.iteritems():
if v:
[drop_feats.append([k, x]) for x in v]
drop_columns = ['{}_{}'.format(k, v) for k,v in drop_feats]
drop_columns.append('MSSubClass_M')
# Check if feature column was generated when encoding and then drop due to missing value in missing features
[dt.df.drop(column, axis=1, inplace=True) for column in drop_columns if column in dt.df.columns]
remove_binary = [x for x in drop_columns if x in dt.binary_feat]
[dt.binary_feat.remove(bin_feat) for bin_feat in remove_binary]
return dt
def preprocessing(dt):
"""
Take log1 transformations of numerical values with absolute skew greater than 0.5 to help normalize data. Then standarize data (mu = 0, sigma = 1) in order to more efficiently train the machine learning algorithms
"""
numeric_features = dt.continuous_feat + dt.discrete_feat + dt.ordinal_feat + ['TotSqFt', 'TotBath', 'SaleAge', 'YrSinceRemodel', 'TimeSinceSold', 'map_LandSlope', 'map_GarageFinish', 'map_Fence']
# These were encoded in encode_labels -> not numerical values
remove_feat = [ 'LotShape',
'LandContour',
'Utilities',
'LandSlope',
'BsmtFinType1',
'BsmtFinType2',
'GarageFinish',
'Fence',
'SaleType',
'SaleCondition',
'MoSold']
# Following transformations determined by plotting feature vs log1p(SalePrice) in PlotsPreTransformedData
print "Begin transformations\n"
# The following features show that although skew is big, log transformations lead to lower Pearson correlation
low_pearson_feat = ['BsmtCond', 'BsmtUnfSF', 'PoolQC']
# Special log transformation of the form log(x/mean(x)+k) applied
mean_log_feat = ['YearBuilt_log', 'MasVnrArea_log', 'BsmtFinSF1_log', 'TotalBsmtSF_log', 'GarageQual_log', 'SaleAge_log']
feat_log = ['YearBuilt', 'MasVnrArea', 'BsmtFinSF1', 'TotalBsmtSF', 'GarageQual', 'SaleAge']
k_s = [1, 1, 10, 1, 10, 1]
mean_log_dict = zip(mean_log_feat, k_s, feat_log)
for feats in mean_log_dict:
mean = dt.df[feats[2]].mean()
dt.df[feats[0]] = np.log(dt.df[feats[2]] / mean + feats[1])
# Power transformations (squared and cubed)
feats = ['BsmtQual', 'BsmtQual', 'BsmtQual', '2ndFlrSF', '2ndFlrSF', '2ndFlrSF', 'BsmtHalfBath', 'BsmtHalfBath', 'BsmtHalfBath']
power_feat = ['BsmtQual1', 'BsmtQual2', 'BsmtQual3', '2ndFlrSF', '2ndFlrSF2', '2ndFlrSF3', 'BsmtHalfBath1', 'BsmtHalfBath2', 'BsmtHalfBath3']
powers = [1, 2, 3, 1, 2, 3, 1, 2, 3]
power_dict = zip(feats, power_feat, powers)
for feats in power_dict:
dt.df[feats[1]] = np.power(dt.df[feats[0]], feats[2])
# Features to remove from skew analysis
remove_feat = remove_feat + low_pearson_feat + mean_log_feat + power_feat
# Remaining numerical features can be transformed to address skew
numeric_features = [x for x in numeric_features if x not in remove_feat]
# Transform the skewed numeric features by taking log(feature + 1).
# This will make the features more normal.
print 'Correct for data skew\n'
# Make sure we are performing transformations only on training data, we don't want data leakage of future information
skewed_features = dt.df.loc[:1459][numeric_features].apply(lambda x: skew(x.dropna().astype(float)))
# This is a hyperparameter that can be tuned (0.75 most common on kaggle), .6 identifies the best division between whether to log transform depending on the pearson correl coeff as shown in the jupyter notebook PlotsPreTransformedData
skewed_features = skewed_features[abs(skewed_features) > 0.75]
skewed_features = skewed_features.index
dt.df[skewed_features] = np.log1p(dt.df[skewed_features])
# Log transform y values
dt.y = np.log1p(dt.y)
return dt
def standarization(dt):
"""
Here we standardize numerical features that are not binary. Since binary features aren't meaningful with respect to sizes or shapes, moreover we would lose interpretations that can be made from the dummy predictors
"""
print 'Standarize features\n'
from sklearn.preprocessing import StandardScaler
dt.df_pretransform = dt.df.copy()
# look to see if there is a way to mask or do something easier
features_stand = [x for x in dt.df.columns.unique() if x not in dt.binary_feat]
# Make array of remaining feature values to standardize
X_array = dt.df.loc[:1459][features_stand].get_values()
X_pred_array = dt.df.loc[1460:][features_stand].get_values()
# Standarize
scaler = StandardScaler(with_mean=True).fit(X_array)
X_scaled = scaler.transform(X_array)
X_pred_scaled = scaler.transform(X_pred_array)
# Reload onto data transporter
df_X = pd.DataFrame(data=X_scaled, index=None, columns=features_stand, copy=True)
df_X_pred = pd.DataFrame(data=X_pred_scaled, index=None, columns=features_stand, copy=True)
df_standarized = pd.concat([df_X, df_X_pred], ignore_index=True)
df_binary = dt.df[dt.binary_feat]
dt.df = pd.concat([df_standarized, df_binary], axis=1, copy=True)
dt.df_train = dt.df.loc[:1459]
dt.df_test = dt.df.loc[1460:]
# dt.X contains the training data set
dt.X = dt.df_train.get_values()
dt.X_pred = dt.df_test.get_values()
# dt.df_train.columns[dt.df_train.max() == 1] old way of getting binary features
return dt
def load_data():
"""
Load data and add constructed variables to dataframe
"""
print "Loading data to data transporter object\n"
data_location = ['../data/train.csv', '../data/test.csv']
dt = data_transporter(data_location)
dt = missing_values(dt)
dt = load_new_features(dt)
dt = map_ordinal_feat(dt)
dt = encode_labels(dt)
dt = remove_missing_features(dt)
dt = preprocessing(dt)
dt = standarization(dt)
# Features that we are going to test on
dt.features = dt.df_train.columns.tolist()
dt.n_features = dt.df_train.shape[1]
print "Done Loading Data\n"
print ""
return dt
if __name__ == '__main__':
all_data = load_data()
file_name = 'processed_data.pkl'
with open(file_name,'wb') as fileObject:
pickle.dump(all_data, fileObject)
|
mit
|
hitszxp/scikit-learn
|
sklearn/tests/test_random_projection.py
|
19
|
14015
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
bsd-3-clause
|
phdowling/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
27
|
41664
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
|
bsd-3-clause
|
grocsvs/grocsvs
|
src/grocsvs/datasets.py
|
1
|
4607
|
import logging
import os
import pandas
import pysam
import cStringIO as StringIO
import sys
from grocsvs import utilities
from grocsvs.utilities import get_key
class Dataset(object):
def serialize(self):
d = self.__dict__.copy()
d["type"] = self.__class__.__name__
del d["sample"]
return d
@staticmethod
def deserialize(sample, dict_):
if not isinstance(dict_, dict):
print "samples must be of type 'dict', not '{}': '{}'".format(type(dict_).__name__, dict_)
sys.exit(1)
dict_ = dict_.copy()
dataset_types = [TenXDataset, ShortFragDataset, MatePairDataset]
dataset_types = dict((x.__name__, x) for x in dataset_types)
type_ = get_key(dict_, "type", error_msg="sample") # just for type-checking
if not type_ in dataset_types:
print "ERROR: Sample type must be one of '{}'; got '{}' instead".format(dataset_types.keys(), type_)
sys.exit(1)
dataset_class = dataset_types[dict_.pop("type")]
dict_["sample"] = sample
#try:
return dataset_class(**dict_)
#except TypeError:
# print "MISSING FIELD FOR SAMPLE:", sample.name, dataset_class
# print " Fields provided:", dataset_class.__class__.__name__, dict_
# sys.exit(1)
class TenXDataset(Dataset):
def __init__(self, **kwdargs):#sample, bam, fragments, phased_fragments, id, sorted_fastqs=None):
self.sample = get_key(kwdargs, "sample", None, error_msg="TenXDataset")
self.bam = os.path.realpath(get_key(kwdargs, "bam", error_msg="TenXDataset"))
#self.fragments = get_key(kwdargs, "fragments", error_msg="TenXDataset")
#self.phased_fragments = get_key(kwdargs, "phased_fragments", error_msg="TenXDataset")
#self.sorted_fastqs = get_key(kwdargs, "sorted_fastqs", default=None, error_msg="TenXDataset")
self.id = get_key(kwdargs, "id", error_msg="TenXDataset")
self.validate()
def validate(self):
assert os.path.exists(self.bam), "missing bam file '{}' for sample '{}' and dataset '{}'".format(
self.bam, self.sample.name, self.id)
# @staticmethod
# def from_longranger_dir(self, longranger_dir):
# fragments = os.path.join(longranger_dir,
# "PHASER_SVCALLER_CS/PHASER_SVCALLER/_REPORTER/"
# "REPORT_SINGLE_PARTITION/fork0/files/fragments.h5")
# bam = os.path.join(longranger_dir,
# "PHASER_SVCALLER_CS/PHASER_SVCALLER/ATTACH_PHASING/"
# "fork0/files/phased_possorted_bam.bam")
# phased_fragments = os.path.join(longranger_dir,
# "10XSARCOMAC1/PHASER_SVCALLER_CS/PHASER_SVCALLER/"
# "_SNPINDEL_PHASER/PHASE_SNPINDELS/fork0/files/"
# "fragment_phasing.tsv.gz")
# self.validate()
# return TenXDataset(bam, fragments, phased_fragments)
# def load_phased_fragments(self, chrom=None, start=None, end=None):
# columns = ["chrom", "start_pos", "end_pos", "phase_set", "ps_start",
# "ps_end", "bc", "h0", "h1", "hmix", "unkn"]
# try:
# tabix = pysam.TabixFile(self.phased_fragments)
# s = StringIO.StringIO("\n".join(tabix.fetch(chrom, start, end)))
# frags = pandas.read_table(s)
# frags.columns = columns
# except (IOError, ValueError):
# frags = pandas.DataFrame(columns=columns)
# return frags
# def load_fragments(self, chrom=None, start=None, end=None):
# tabix = pysam.TabixFile()
# try:
# fragments = utilities.read_data_frame(self.fragments)
# goodbcs = utilities.get_good_barcodes(fragments)
# fragments = fragments.loc[fragments["bc"].isin(goodbcs)]
# # fragments = fragments.loc[fragments["num_reads"]>5]
# if chrom is not None:
# fragments = fragments.loc[fragments["chrom"]==chrom]
# return fragments
# except:
# logging.exception("Unable to load fragments from fragments file "
# "'{}'".format(self.fragments))
# raise
class ShortFragDataset(Dataset):
def __init__(self, sample, bam, id):
self.sample = sample
self.bam = os.path.realpath(bam)
self.id = id
class MatePairDataset(Dataset):
def __init__(self, sample, bam, id):
self.sample = sample
self.bam = os.path.realpath(bam)
self.id = id
|
mit
|
aaroncnb/comic_dust
|
q_interp.py
|
1
|
4409
|
# coding: utf-8
# In[1]:
import pyfits
import numpy as np
# Set up matplotlib and use a nicer set of plot parameters
get_ipython().magic(u'config InlineBackend.rc = {}')
import matplotlib
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
from astropy.io import fits
# In[2]:
####The 'helper function' suggestion, below, comes from the follwing very helpful Stack Exchange answer:
#########http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array -by user "eat"
def nan_helper(row):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(row), lambda z: z.nonzero()[0]
########################
#######################
# In[3]:
## Give the fits file name:
input_map = 'NL_SLIT1.fits'
#interpolated_map_deadfix = 'NL_SLIT1_interp_deadfix.fits'
interpolated_map_hotfix = 'NL_SLIT1_interp_hotfix.fits'
## Load the fits file into Python:
hdulist = pyfits.open(input_map)
## Get the file's data from the "HDU" structure, using the '.data' attribute:
data = hdulist[0].data
## Print the dimensions for confirmation:
print data.shape
print data.dtype.name
# In[4]:
data_dead = np.copy(data[0])
for y in range(0,240):
row = data_dead[y,:]
row_bad = np.where(row < -130.)
row[row_bad] = np.nan
nans, x= nan_helper(row)
row[nans]= np.interp(x(nans), x(~nans), row[~nans])
data_dead[y,:] = row
# In[5]:
plt.imshow(data_dead, cmap='gray')
plt.colorbar()
# In[6]:
data_hot = np.copy(data_dead)
for y in range(0,240):
for x in range(0,320):
if 74 < y < 116 and 273 < x < 284: #This is to ignore the Y-range where the strong line emission appears
continue
if data_hot[y,x] > 4000.:
data_hot[y,x] = np.nan
row = data_hot[y,:]
nans, x= nan_helper(row)
row[nans]= np.interp(x(nans), x(~nans), row[~nans])
data_hot[y,:] = row
# In[7]:
plt.imshow(data_hot, cmap='gray')
plt.colorbar()
# In[8]:
## Second pass of hot pixel interpolation - this time completely
## excluding the bright source regions and line emission.
## The process is the same as above, but with a lower threshold.
## This should interpolate all of the bad pixels which are outside of the bright regions
data_hot_2 = np.copy(data_hot)
for y in range(0,240):
for x in range(0,320):
if 78 < y < 113 or 271 < x < 289: #This is to ignore the Y-range where the strong line emission appears
continue
if data_hot_2[y,x] > 1000.:
data_hot_2[y,x] = np.nan
row = data_hot_2[y,:]
nans, x= nan_helper(row)
row[nans]= np.interp(x(nans), x(~nans), row[~nans])
data_hot_2[y,:] = row
# In[10]:
plt.imshow(data_hot_2, cmap='gray')
plt.colorbar()
# In[17]:
## Second pass of hot pixel interpolation - this time completely
## excluding the bright source regions and line emission.
## The process is the same as above, but with a lower threshold.
## This should interpolate all of the bad pixels which are outside of the bright regions
data_hot_3 = np.copy(data_hot_2)
for y in range(0,240):
for x in range(0,320):
if (78 < y < 113) or (0 < x < 41) or (270 < x < 289): #This is to ignore the Y-range where the strong line emission appears
continue
if data_hot_3[y,x] > 600.:
data_hot_3[y,x] = np.nan
row = data_hot_3[y,:]
nans, x= nan_helper(row)
row[nans]= np.interp(x(nans), x(~nans), row[~nans])
data_hot_3[y,:] = row
# In[19]:
plt.imshow(data_hot_3, cmap='gray')
plt.colorbar()
# In[21]:
## Write the interpolated array to a file (keeping the header info):
data[0] = data_hot_3
#hdulist.writeto(interpolated_map_deadfix, output_verify='ignore',clobber=True)
hdulist.writeto(interpolated_map_hotfix, output_verify='ignore',clobber=True)
# In[ ]:
|
apache-2.0
|
allenlavoie/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/debug_test.py
|
40
|
32402
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)],
labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):],
labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(language, dimension=1)
]
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
astroJeff/dart_board
|
paper/scripts/mock_3_low.py
|
1
|
2611
|
import sys
import numpy as np
import time
import matplotlib
matplotlib.use('Agg')
sys.path.append("../pyBSE/")
import pybse
import dart_board
from dart_board import sf_history
# Values for mock system 3
# Input values: 11.01 7.42 744.19 0.50 167.69 1.79 2.08 83.2559 -69.9377 36.59
# Output values: 1.31 7.43 53.45 0.458 36.30 1.140e-12 25.58 13 1
LMC_metallicity = 0.008
system_kwargs = {"M2" : 7.84, "M2_err" : 0.25,
"P_orb" : 14.11, "P_orb_err" : 1.0,
"ecc" : 0.47, "ecc_err" : 0.05,
"L_x" : 1.94e33, "L_x_err" : 1.0e32,
"ra" : 83.5744461 , "dec" : -69.4876344}
pub = dart_board.DartBoard("HMXB", evolve_binary=pybse.evolve,
ln_prior_pos=sf_history.lmc.prior_lmc, nwalkers=320,
threads=20, ntemps=10,
metallicity=LMC_metallicity, thin=100,
system_kwargs=system_kwargs)
# Darts need to be in ln
pub.aim_darts(N_iterations=200000, a_set='low')
start_time = time.time()
pub.throw_darts(nburn=2, nsteps=150000)
print("Simulation took",time.time()-start_time,"seconds.")
# Since emcee_PT does not have a blobs function, we must include the following calculation
if pub.chains.ndim == 4:
print("Generating derived values...")
ntemps, nchains, nsteps, nvar = pub.chains.shape
pub.derived = np.zeros(shape=(ntemps, nchains, nsteps, 9))
for i in range(ntemps):
for j in range(nchains):
for k in range(nsteps):
x_i = pub.chains[i,j,k]
ln_M1, ln_M2, ln_a, ecc, v_kick_1, theta_kick_1, phi_kick_1, ra, dec, ln_t = x_i
M1 = np.exp(ln_M1)
M2 = np.exp(ln_M2)
a = np.exp(ln_a)
time = np.exp(ln_t)
P_orb = dart_board.posterior.A_to_P(M1, M2, a)
output = pybse.evolve(M1, M2, P_orb, ecc, v_kick_1, theta_kick_1, phi_kick_1,
v_kick_1, theta_kick_1, phi_kick_1,
time, LMC_metallicity, False)
pub.derived[i,j,k] = np.array([output])
print("...finished.")
# Acceptance fraction
print("Acceptance fractions:",pub.sampler.acceptance_fraction)
# Autocorrelation length
try:
print("Autocorrelation length:", pub.sample.acor)
except:
print("Acceptance fraction is too low.")
# Save outputs
np.save("../data/mock_3_low_chain.npy", pub.chains)
np.save("../data/mock_3_low_derived.npy", pub.derived)
np.save("../data/mock_3_low_lnprobability.npy", pub.lnprobability)
|
mit
|
andyfaff/scipy
|
scipy/integrate/odepack.py
|
21
|
10740
|
# Author: Travis Oliphant
__all__ = ['odeint']
import numpy as np
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error).",
-8: "Run terminated (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
This sequence must be monotonically increasing or monotonically
decreasing; repeated values are allowed.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst: bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step
'tcur' vector with the value of t reached for each time step
(will always be at least as large as the input times)
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise
'lenrw' the length of the double work array required
'leniw' the length of integer work array required
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g., singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : solve an initial value problem for a system of ODEs
ode : a more object-oriented integrator based on VODE
quad : for finding the area under a curve
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in Python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
dt = np.diff(t)
if not((dt >= 0).all() or (dt <= 0).all()):
raise ValueError("The values in t must be monotonically increasing "
"or monotonically decreasing; repeated values are "
"allowed.")
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
|
bsd-3-clause
|
theusual/kaggle-seeclickfix-ensemble
|
Bryan/data_io.py
|
2
|
4219
|
"""
Functions for data IO
"""
__author__ = 'Bryan Gregory'
__email__ = '[email protected]'
__date__ = '09-06-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
#External modules
import json
import csv
import gc
import pandas as pd
import time
import os
from datetime import datetime
from sklearn.externals import joblib
#import JSON data into a dict
def load_json(file_path):
return [json.loads(line) for line in open(file_path)]
#import delimited flat file into a list
def load_flatfile(file_path, delimiter=''):
temp_array = []
#if no delimiter is specified, try to use the built-in delimiter detection
if delimiter == '':
csv_reader = csv.reader(open(file_path))
else:
csv_reader = csv.reader(open(file_path),delimiter)
for line in csv_reader:
temp_array += line
return temp_array #[line for line in csv_reader]
#import delimited flat file into a pandas dataframe
def load_flatfile_to_df(file_path, delimiter=''):
#if no delimiter is specified, try to use the built-in delimiter detection
if delimiter == '':
return pd.read_csv(file_path)
else:
return pd.read_csv(file_path, delimiter)
def save_predictions(df,target,model_name='',directory='Submits/',estimator_class='',note=''):
timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
filename = directory+timestamp+'--'+model_name+'_'+estimator_class+'_'+note+'.csv'
#---Perform any manual predictions cleanup that may be necessary---#
#Save predictions
try:
df[target] = [x[0] for x in df[target]]
except IndexError:
df[target] = [x for x in df[target]]
df.ix[:,['id',target]].to_csv(filename, index=False)
log.info('Submission file saved: %s' % filename)
def save_combined_predictions(df,directory,filename,note=''):
#If previous combined predictions already exist, archive existing ones by renaming to append datetime
try:
modified_date = time.strptime(time.ctime(os.path.getmtime(directory+filename)), '%a %b %d %H:%M:%S %Y')
modified_date = datetime.fromtimestamp(time.mktime(modified_date)).strftime('%m-%d-%y_%H%M')
archived_file = directory+'Archive/'+filename[:len(filename)-4]+'--'+modified_date+'.csv'
os.rename(directory+filename,archived_file)
log.info('File already exists with given filename, archiving old file to: '+ archived_file)
except WindowsError:
pass
#Save predictions
df.to_csv(directory+filename, index=False)
log.info('Predictions saved: %s' % filename)
def save_cached_object(object, filename, directory='Cache/'):
"""Save cached objects in pickel format using joblib compression.
If a previous cached file exists, then get its modified date and append it to filename and archive it
"""
if filename[-4:] != '.pkl':
filename = filename+'.pkl'
try:
modified_date = time.strptime(time.ctime(os.path.getmtime(directory+filename)), '%a %b %d %H:%M:%S %Y')
modified_date = datetime.fromtimestamp(time.mktime(modified_date)).strftime('%m-%d-%y_%H%M')
archived_file = directory+'Archive/'+filename[:len(filename)-4]+'--'+modified_date+'.pkl'
os.rename(directory+filename,archived_file)
log.info('Cached object already exists with given filename, archiving old object to: '+ archived_file)
except WindowsError:
pass
joblib.dump(object, directory+filename, compress=9)
log.info('New object cached to: '+directory+filename)
def load_cached_object(filename, directory='Cache/'):
if filename[-4:] != '.pkl':
filename = filename+'.pkl'
try:
object = joblib.load(directory+filename)
log.info('Successfully loaded object from: '+directory+filename)
except IOError:
log.info('Cached object does not exist: '+directory+filename)
return object
def save_text_features(output_file, feature_names):
o_f = open( output_file, 'wb' )
feature_names = '\n'.join( feature_names )
o_f.write( feature_names )
|
bsd-3-clause
|
jcfr/mystic
|
examples/test_lorentzian2.py
|
1
|
3233
|
#!/usr/bin/env python
#
# Author: Patrick Hung (patrickh @caltech)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Same as test_lorentzian, but with n being a fitted variable
This is MUCH faster than test_lorentzian because the cost function no
longer has to do an "integral" as an intermediate step
"""
import pylab, matplotlib
from numpy import *
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration, VTR
from mystic.strategy import Best1Exp, Rand1Exp, Best2Exp, Best2Exp
from mystic.monitors import Monitor
from mystic.tools import getch
from mystic.tools import random_seed
random_seed(123)
#matplotlib.interactive(True)
from mystic.models import lorentzian
F = lorentzian.ForwardFactory
def show():
import Image
pylab.savefig('test_lorentzian_out',dpi=72)
im = Image.open('test_lorentzian_out.png')
im.show()
return
def plot_sol(solver=None):
def _(params):
import signal
print "plotting params: ", params
pylab.errorbar(binsc, histo, sqrt(histo), fmt='b+')
x = arange(xmin, xmax, (0.1* binwidth))
pylab.plot(x, pdf(x)*N,'b:')
pylab.plot(x, F(params)(x)*N,'r-')
pylab.xlabel('E (GeV)')
pylab.ylabel('Counts')
try: show()
except ImportError: pylab.show()
if solver is not None:
signal.signal(signal.SIGINT, solver.signal_handler)
return _
ND = 7
NP = 50
MAX_GENERATIONS = 200
generations = 200
minrange = [0.5,30, -15, 10, 0, 0, 100]
maxrange = [2, 60., -5, 50, 2, 1, 200]
def de_solve(CF):
solver = DifferentialEvolutionSolver(ND, NP)
solver.enable_signal_handler()
stepmon = Monitor()
solver.SetRandomInitialPoints(min=minrange,max=maxrange)
solver.SetStrictRanges(min=minrange,max=maxrange)
solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
solver.SetGenerationMonitor(stepmon)
termination=ChangeOverGeneration(generations=generations)
solver.Solve(CF, termination=termination, strategy=Rand1Exp, \
sigint_callback = plot_sol(solver))
solution = solver.Solution()
return solution, stepmon
if __name__ == '__main__':
target = [1., 45., -10., 20., 1., 0.1, 120.]
from mystic.models.lorentzian import gendata, histogram
npts = 4000; binwidth = 0.1
N = npts * binwidth
xmin, xmax = 0.0, 3.0
pdf = F(target)
print "pdf(1): ", pdf(1)
data = gendata(target, xmin, xmax, npts)
pylab.plot(data[1:N],0*data[1:N],'k.')
pylab.title('Samples drawn from density to be estimated.')
try: show()
except ImportError: pylab.show()
pylab.clf()
binsc, histo = histogram(data, binwidth, xmin,xmax)
print "binsc: ", binsc
print "count: ", histo
print "ncount: ", histo/N
print "exact : ", pdf(binsc)
print "now with DE..."
myCF = lorentzian.CostFactory2(binsc, histo/N, ND)
sol, steps = de_solve(myCF)
plot_sol()(sol)
#print "steps: ", steps.x, steps.y
# end of file
|
bsd-3-clause
|
hksonngan/pynopticon
|
src/em/examples/pdfestimation.py
|
4
|
1418
|
#! /usr/bin/env python
# Last Change: Sun Jul 22 12:00 PM 2007 J
# Example of doing pdf estimation with EM algorithm. Requires matplotlib.
import numpy as N
import pylab as P
from scikits.learn.machine.em import EM, GM, GMM
import utils
oldfaithful = utils.get_faithful()
# We want the relationship between d(t) and w(t+1), but get_faithful gives
# d(t), w(t), so we have to shift to get the "usual" faithful data
waiting = oldfaithful[1:, 1:]
duration = oldfaithful[:len(waiting), :1]
dt = N.concatenate((duration, waiting), 1)
# Scale the data so that each component is in [0..1]
dt = utils.scale(dt)
# This function train a mixture model with k components, returns the trained
# model and the BIC
def cluster(data, k, mode = 'full'):
d = data.shape[1]
gm = GM(d, k, mode)
gmm = GMM(gm)
em = EM()
em.train(data, gmm, maxiter = 20)
return gm, gmm.bic(data)
# bc will contain a list of BIC values for each model trained
bc = []
mode = 'full'
P.figure()
for k in range(1, 5):
# Train a model of k component, and plots isodensity curve
P.subplot(2, 2, k)
gm, b = cluster(dt, k = k, mode = mode)
bc.append(b)
X, Y, Z, V = gm.density_on_grid()
P.contour(X, Y, Z, V)
P.plot(dt[:, 0], dt[:, 1], '.')
P.xlabel('duration time (scaled)')
P.ylabel('waiting time (scaled)')
print "According to the BIC, model with %d components is better" % (N.argmax(bc) + 1)
|
gpl-3.0
|
YzPaul3/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_anomaly_deeplearning_large.py
|
8
|
1702
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
def anomaly():
print("Deep Learning Anomaly Detection MNIST")
train = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
test = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/test.csv.gz"))
predictors = list(range(0,784))
resp = 784
# unsupervised -> drop the response column (digit: 0-9)
train = train[predictors]
test = test[predictors]
# 1) LEARN WHAT'S NORMAL
# train unsupervised Deep Learning autoencoder model on train_hex
ae_model = H2OAutoEncoderEstimator(activation="Tanh", hidden=[2], l1=1e-5, ignore_const_cols=False, epochs=1)
ae_model.train(x=predictors,training_frame=train)
# 2) DETECT OUTLIERS
# anomaly app computes the per-row reconstruction error for the test data set
# (passing it through the autoencoder model and computing mean square error (MSE) for each row)
test_rec_error = ae_model.anomaly(test)
# 3) VISUALIZE OUTLIERS
# Let's look at the test set points with low/median/high reconstruction errors.
# We will now visualize the original test set points and their reconstructions obtained
# by propagating them through the narrow neural net.
# Convert the test data into its autoencoded representation (pass through narrow neural net)
test_recon = ae_model.predict(test)
# In python, the visualization could be done with tools like numpy/matplotlib or numpy/PIL
if __name__ == "__main__":
pyunit_utils.standalone_test(anomaly)
else:
anomaly()
|
apache-2.0
|
Stanford-Online/edx-analytics-pipeline
|
edx/analytics/tasks/common/tests/test_mysql.py
|
2
|
6726
|
"""
Ensure we can read from MySQL data sources.
"""
from __future__ import absolute_import
import datetime
import textwrap
import unittest
import luigi
from mock import MagicMock, patch, sentinel
from pandas import read_csv
from edx.analytics.tasks.common.mysql_dump import MysqlSelectTask, mysql_datetime
from edx.analytics.tasks.util.tests.config import with_luigi_config
from edx.analytics.tasks.util.tests.target import FakeTarget
from edx.analytics.tasks.util.url import ExternalURL
class ConversionTestCase(unittest.TestCase):
"""
Ensure we can reliably convert native python data types to strings.
"""
def setUp(self):
self.task = MysqlSelectTask(
credentials=sentinel.ignored,
destination=sentinel.ignored
)
def test_convert_datetime(self):
self.assert_converted_string_equals(
datetime.datetime.strptime('2014-01-02', '%Y-%m-%d').date(), '2014-01-02'
)
def assert_converted_string_equals(self, obj, expected_string):
"""
Args:
obj (mixed): Any object.
expected_string (str): The expected string representation of `obj`.
Raises:
AssertionError: iff the string resulting from converting `obj` to a string does not match the
expected string.
"""
self.assertEquals(self.task.convert(obj), expected_string)
def test_convert_integer(self):
self.assert_converted_string_equals(
10, '10'
)
def test_convert_none(self):
self.assert_converted_string_equals(
None, '-'
)
def test_convert_unicode(self):
self.assert_converted_string_equals(
u'\u0669(\u0361\u0e4f\u032f\u0361\u0e4f)\u06f6',
u'\u0669(\u0361\u0e4f\u032f\u0361\u0e4f)\u06f6'.encode('utf-8')
)
class MysqlSelectTaskTestCase(unittest.TestCase):
"""
Ensure we can connect to and read data from MySQL data sources.
"""
def setUp(self):
patcher = patch('edx.analytics.tasks.common.mysql_dump.mysql.connector')
self.mock_mysql_connector = patcher.start()
self.addCleanup(patcher.stop)
mock_conn = self.mock_mysql_connector.connect.return_value # pylint: disable=maybe-no-member
self.mock_cursor = mock_conn.cursor.return_value
# By default, emulate 0 results returned
self.mock_cursor.fetchone.return_value = None
def run_task(self, credentials=None, query=None):
"""
Emulate execution of a generic MysqlSelectTask.
"""
if not credentials:
credentials = '''\
{
"host": "db.example.com",
"port": "3306",
"username": "exampleuser",
"password": "example password"
}'''
if not query:
query = 'SELECT 1'
# Create a dummy task that simply returns the parameters given
class TestTask(MysqlSelectTask):
"""A generic MysqlSelectTask that wraps the parameters from the enclosing function"""
database = "exampledata"
@property
def query(self):
return query
@property
def filename(self):
return None # pragma: no cover
task = TestTask(
credentials=sentinel.ignored,
destination=sentinel.ignored
)
fake_input = {
'credentials': FakeTarget(value=textwrap.dedent(credentials))
}
task.input = MagicMock(return_value=fake_input)
output_target = FakeTarget()
task.output = MagicMock(return_value=output_target)
task.run()
try:
parsed = read_csv(output_target.buffer,
header=None,
sep="\t",
na_values=['-'],
encoding='utf-8')
except ValueError:
parsed = None
return parsed
def test_connect_with_missing_credentials(self):
with self.assertRaises(KeyError):
self.run_task('{}')
def test_connect_with_credential_syntax_error(self):
with self.assertRaises(ValueError):
self.run_task('{')
def test_connect_with_complete_credentials(self):
self.run_task()
def test_execute_query(self):
self.mock_cursor.fetchone.side_effect = [
(2L,),
(3L,),
(10L,),
None
]
output = self.run_task(query=sentinel.query)
self.mock_cursor.execute.assert_called_once_with(sentinel.query, tuple())
self.assertEquals(output[0][0], 2)
self.assertEquals(output[0][1], 3)
self.assertEquals(output[0][2], 10)
def test_unicode_results(self):
unicode_string = u'\u0669(\u0361\u0e4f\u032f\u0361\u0e4f)\u06f6'
self.mock_cursor.fetchone.side_effect = [
(unicode_string,),
None
]
output = self.run_task(query=sentinel.query)
self.assertEquals(output[0][0], unicode_string)
def test_default_attributes(self):
destination = 'file:///tmp/foo'
class GenericTask(MysqlSelectTask):
"""A dummy task used to ensure defaults are reasonable"""
@property
def filename(self):
return 'bar'
task = GenericTask(
credentials=sentinel.credentials,
destination=destination,
database=sentinel.database,
)
self.assertEquals(task.credentials, sentinel.credentials)
self.assertEquals(task.database, sentinel.database)
self.assertEquals(task.destination, destination)
self.assertEquals(task.query, 'SELECT 1')
self.assertEquals(task.query_parameters, tuple())
self.assertIsInstance(task.requires()['credentials'], ExternalURL)
self.assertEquals(task.requires()['credentials'].url, sentinel.credentials)
self.assertIsInstance(task.output(), luigi.LocalTarget)
self.assertEquals(task.output().path, '/tmp/foo/bar') # pylint: disable=maybe-no-member
@with_luigi_config('database-import', 'database', 'foobar')
def test_parameters_from_config(self):
t = MysqlSelectTask(credentials=sentinel.credentials, destination=sentinel.destination)
self.assertEquals(t.database, 'foobar')
def test_mysql_timestamp(self):
string_timestamp = '2014-01-02 13:10:11'
timestamp = datetime.datetime.strptime(string_timestamp, '%Y-%m-%d %H:%M:%S')
self.assertEquals(mysql_datetime(timestamp), string_timestamp)
|
agpl-3.0
|
gotomypc/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
200
|
17427
|
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
rafaelmartins/rst2pdf
|
setup.py
|
6
|
3121
|
# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import os
from setuptools import setup, find_packages
version = '0.93'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('LICENSE.txt')
+ '\n' +
'Detailed Documentation\n'
'**********************\n'
+ '\n' +
read('README.txt')
+ '\n' +
'Contributors\n'
'************\n'
+ '\n' +
read('Contributors.txt')
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
read('CHANGES.txt')
+ '\n' +
'Download\n'
'********\n'
)
install_requires = [
'setuptools',
'docutils',
'reportlab>=2.4',
'Pygments',
'pdfrw',
]
try:
import json
except ImportError:
install_requires.append('simplejson')
tests_require = ['pyPdf']
sphinx_require = ['sphinx']
hyphenation_require = ['wordaxe>=1.0']
images_require = ['PIL']
pdfimages_require = ['pyPdf','PythonMagick']
pdfimages2_require = ['pyPdf','SWFTools']
svgsupport_require = ['svg2rlg']
aafiguresupport_require = ['aafigure>=0.4']
mathsupport_require = ['matplotlib']
rawhtmlsupport_require = ['xhtml2pdf']
setup(
name="rst2pdf",
version=version,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
package_data=dict(rst2pdf=['styles/*.json',
'styles/*.style',
'images/*png',
'images/*jpg',
'templates/*tmpl'
]),
include_package_data=True,
dependency_links=[
],
install_requires=install_requires,
tests_require=tests_require,
extras_require=dict(
tests=tests_require,
sphinx=sphinx_require,
hyphenation=hyphenation_require,
images=images_require,
pdfimages=pdfimages_require,
pdfimages2=pdfimages2_require,
svgsupport=svgsupport_require,
aafiguresupport=aafiguresupport_require,
mathsupport=mathsupport_require,
rawhtmlsupport=rawhtmlsupport_require,
),
# metadata for upload to PyPI
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Utilities',
],
author="Roberto Alsina",
author_email="ralsina at netmanagers dot com dot ar",
description="Convert restructured text to PDF via reportlab.",
long_description=long_description,
license="MIT",
keywords="restructured convert rst pdf docutils pygments reportlab",
url="http://rst2pdf.googlecode.com",
download_url="http://code.google.com/p/rst2pdf/downloads/list",
entry_points={'console_scripts': ['rst2pdf = rst2pdf.createpdf:main']},
test_suite='rst2pdf.tests.test_rst2pdf.test_suite',
)
|
mit
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs/ILINet_F3_zOR_benchmark_altnorm.py
|
1
|
8694
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/19/14
###Function: mean peak-based retro zOR metric vs. CDC benchmark index, mean Thanksgiving-based early zOR metric vs. CDC benchmark index
###Import data: /home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index_long.csv, CDC_Source/Import_Data/all_cdc_source_data.csv, Census/Import_Data/totalpop_age_Census_98-14.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python ILINet_F3_zOR_benchmark.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
# 2013-14 ILINet data is normalized by estimated population size from December 2013 because 2014 estimates are not available at this time
# 2009-10 data is removed from cdc_severity_index_long.csv
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
incidin.readline() # remove header
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# normalization scheme: pre-pandemic and post-pandemic (1997-98 through 2008-09, 2010-11 through 2013-14)
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index_long_norm1.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
# normalization scheme: 1997-98 through 2002-03, 2003-04 through 2008-09, 2010-11 through 2013-14
ixin2 = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index_long_norm2.csv','r')
ixin2.readline()
ix2 = csv.reader(ixin2, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_ILINet_seasonlabels
fs = 24
fssml = 16
# coordinates for mild and severe text
mildretro_txtcoords, sevretro_txtcoords = fxn.gp_txt_retro_coords
mildearly_txtcoords, sevearly_txtcoords = fxn.gp_txt_early_coords
### program ###
# import data
# d_benchmark[seasonnum] = CDC benchmark index value
d_benchmark1 = fxn.benchmark_import(ix, 8) # no ILINet, norm scheme 1
d_benchmark2 = fxn.benchmark_import(ix2, 8) # no ILINet, norm scheme 2
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# dict_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# plot values
benchmark1 = [d_benchmark1[s] for s in ps]
benchmark2 = [d_benchmark2[s] for s in ps]
retrozOR = [d_classifzOR[s][0] for s in ps]
earlyzOR = [d_classifzOR[s][1] for s in ps]
print 'retro corr coef, norm 1', np.corrcoef(benchmark1, retrozOR)
print 'early corr coef, norm 1', np.corrcoef(benchmark1, earlyzOR)
print 'retro corr coef, norm 2', np.corrcoef(benchmark2, retrozOR)
print 'early corr coef, norm 2', np.corrcoef(benchmark2, earlyzOR)
# normalization scheme 1: plots
# pre and post-pandemic normalization scheme
# mean retro zOR vs. benchmark index
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
ax1.plot(benchmark1, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
ax1.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax1.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax1.fill([-6, -1, -1, -6], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax1.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax1.fill([1, 10, 10, 1], [-1, -1, -20, -20], facecolor='red', alpha=0.4)
ax1.annotate('Mild', xy=mildretro_txtcoords, fontsize=fssml)
ax1.annotate('Severe', xy=sevretro_txtcoords, fontsize=fssml)
for s, x, y in zip(sl, benchmark1, retrozOR):
ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax1.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax1.tick_params(axis='both',labelsize=fssml)
ax1.set_xlim([-6,10])
ax1.set_ylim([-20,20])
ax1.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/ILINet/all_ILINet/zOR_benchmark_norm1.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# mean early warning zOR vs. benchmark index
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.plot(benchmark1, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
ax2.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax2.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax2.fill([-6, -1, -1, -6], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax2.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax2.fill([1, 10, 10, 1], [-1, -1, -20, -20], facecolor='red', alpha=0.4)
ax2.annotate('Mild', xy=mildearly_txtcoords, fontsize=fssml)
ax2.annotate('Severe', xy=sevearly_txtcoords, fontsize=fssml)
for s, x, y in zip(sl, benchmark1, earlyzOR):
ax2.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax2.set_ylabel(fxn.gp_sigma_w, fontsize=fs)
ax2.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-6,6])
ax2.set_ylim([-8,8])
ax2.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/ILINet/all_ILINet/zOR_benchmark_early_norm1.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# normalization scheme 2: plots
# 97-03, 03-09, 10-14 norm scheme
# mean retro zOR vs. benchmark index
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
ax3.plot(benchmark2, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
ax3.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax3.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax3.fill([-6, -1, -1, -6], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax3.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax3.fill([1, 10, 10, 1], [-1, -1, -20, -20], facecolor='red', alpha=0.4)
ax3.annotate('Mild', xy=mildretro_txtcoords, fontsize=fssml)
ax3.annotate('Severe', xy=sevretro_txtcoords, fontsize=fssml)
for s, x, y in zip(sl, benchmark2, retrozOR):
ax3.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax3.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax3.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax3.tick_params(axis='both',labelsize=fssml)
ax3.set_xlim([-6,10])
ax3.set_ylim([-20,20])
ax3.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/ILINet/all_ILINet/zOR_benchmark_norm2.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# mean early warning zOR vs. benchmark index
fig4 = plt.figure()
ax4 = fig4.add_subplot(1,1,1)
ax4.plot(benchmark2, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
ax4.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax4.hlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax4.fill([-6, -1, -1, -6], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax4.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax4.fill([1, 10, 10, 1], [-1, -1, -20, -20], facecolor='red', alpha=0.4)
ax4.annotate('Mild', xy=mildearly_txtcoords, fontsize=fssml)
ax4.annotate('Severe', xy=sevearly_txtcoords, fontsize=fssml)
for s, x, y in zip(sl, benchmark2, earlyzOR):
ax4.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax4.set_ylabel(fxn.gp_sigma_w, fontsize=fs)
ax4.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax4.tick_params(axis='both', labelsize=fssml)
ax4.set_xlim([-6,6])
ax4.set_ylim([-8,8])
ax4.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/ILINet/all_ILINet/zOR_benchmark_early_norm2.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
|
mit
|
weegreenblobbie/mplapp
|
mplapp/button.py
|
1
|
2966
|
# Python
import colorsys
import time
from matplotlib.colors import ColorConverter
from mplapp.label import Label
class Button(Label):
"""
A push button.
"""
def __init__(self, width, height, text, callback = None, **kwargs):
if callback and not callable(callback):
raise ValueError("callback isn't callable!")
self._callback = callback
ec = 'black'
if 'ec' not in kwargs and 'edgecolor' not in kwargs:
kwargs['ec'] = ec
super(Button, self).__init__(width, height, text, **kwargs)
self._cid = None
def is_enabled(self):
return self._cid is not None
def enable(self):
if self._cid: # already enabled?
return
# set enabled colors
ec, fc, text_color = self._colors
ax = self._axes
ax.set_axis_bgcolor(fc)
for side in ['bottom', 'top', 'left', 'right']:
ax.spines[side].set_color(ec)
self._text.set_color(text_color)
self._axes.figure.canvas.draw()
self._cid = self._axes.figure.canvas.mpl_connect(
'button_press_event', self._blink_on_click)
def disable(self):
if self._cid is None: # already disabled?
return
self._axes.figure.canvas.mpl_disconnect(self._cid)
self._cid = None
# set disabled colors
ax = self._axes
grey65 = [1.0 - 0.65] * 3
grey45 = [1.0 - 0.45] * 3
grey25 = [1.0 - 0.20] * 3
ax.set_axis_bgcolor(grey25)
for side in ['bottom', 'top', 'left', 'right']:
ax.spines[side].set_color(grey45)
self._text.set_color(grey65)
self._axes.figure.canvas.draw()
def _render(self, fig, x, y):
super(Button, self)._render(fig, x, y)
self._cid = self._axes.figure.canvas.mpl_connect(
'button_press_event',
self._blink_on_click
)
def _blink_on_click(self, event):
"""
'blink' the axis color to give visual feedback the button has been
pressed.
Reference: http://stackoverflow.com/a/1165145/562106
"""
if event.inaxes != self._axes:
return
orig_color = self._axes.get_axis_bgcolor()
r,g,b = ColorConverter().to_rgb(orig_color)
cmax = max([r,g,b])
cmin = min([r,g,b])
# gray?
if abs(cmax - cmin) < 5:
if cmax > 0.5:
r,g,b = 0.10,0.10,0.10
else:
r,g,b = 0.90,0.90,0.90
h,l,s = colorsys.rgb_to_hls(r,g,b)
# invert hue
h = 360.0 - h
new_color = colorsys.hls_to_rgb(h,l,s)
self._axes.set_axis_bgcolor(new_color)
self._axes.figure.canvas.draw()
time.sleep(0.05)
self._axes.set_axis_bgcolor(orig_color)
self._axes.figure.canvas.draw()
if self._callback:
self._callback(event)
|
mit
|
BhallaLab/moose
|
moose-core/python/moose/helper.py
|
4
|
2432
|
"""helper.py:
Some helper functions which are compatible with both python2 and python3.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
import re
import subprocess
def execute(cmd):
"""execute: Execute a given command.
:param cmd: string, given command.
Return:
------
Return a iterator over output.
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def find_files( dirname, ext=None, name_contains=None, text_regex_search=None):
files = []
for d, sd, fs in os.walk(dirname):
for f in fs:
fpath = os.path.join(d,f)
include = True
if ext is not None:
if f.split('.')[-1] != ext:
include = False
if name_contains:
if name_contains not in os.path.basename(f):
include = False
if text_regex_search:
with open(fpath, 'r' ) as f:
txt = f.read()
if re.search(text_regex_search, txt) is None:
include = False
if include:
files.append(fpath)
return files
# Matplotlib text for running simulation. It make sures at each figure is saved
# to individual png files.
matplotlibText = """
print( '>>>> saving all figues')
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
def saveall(prefix='results', figs=None):
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
outfile = '%s.%d.png' % (prefix, i)
fig.savefig(outfile)
print( '>>>> %s saved.' % outfile )
plt.close()
try:
saveall()
except Exception as e:
print( '>>>> Error in saving: %s' % e )
quit(0)
"""
|
gpl-3.0
|
Hiyorimi/scikit-image
|
doc/examples/features_detection/plot_multiblock_local_binary_pattern.py
|
9
|
2603
|
"""
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
######################################################################
# Now let's apply the operator to a real image and see how the visualization
# works.
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
######################################################################
# On the above plot we see the result of computing a MB-LBP and visualization
# of the computed feature. The rectangles that have less intensities' sum
# than the central rectangle are marked in cyan. The ones that have higher
# intensity values are marked in white. The central rectangle is left
# untouched.
|
bsd-3-clause
|
Sentient07/scikit-learn
|
examples/preprocessing/plot_function_transformer.py
|
158
|
1993
|
"""
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
|
bsd-3-clause
|
joernhees/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
103
|
2017
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matplotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
hongguangguo/shogun
|
examples/undocumented/python_modular/graphical/classifier_perceptron_graphical.py
|
26
|
2311
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from modshogun import RealFeatures, BinaryLabels
from modshogun import Perceptron
from modshogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
|
gpl-3.0
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/examples/ex_generic_mle.py
|
3
|
16380
|
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print res
#np.allclose(res.params, probit_res.params)
print res.params, probit_res.params
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print res_norm.params
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print res_norm.params
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print res_norm2.params
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print start_params
print res_norm3.params
print res2.bse
#print res_norm3.bse # not available
print 'llf', res2.llf, res_norm3.llf
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print np.linalg.eigh(hh)
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print grad
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print (gradb+gradf)/2.
print res_norm3.model.score(res_norm3.params)
print res_norm3.model.score(start_params)
mod_norm2.loglike(start_params/2.)
print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
print np.sqrt(np.diag(res_bfgs.cov_params()))
print res_norm3.bse
print "MLE - OLS parameter estimates"
print res_norm3.params[:-1] - res2.params
print "bse diff in percent"
print (res_norm3.bse[:-1] / res2.bse)*100. - 100
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print res_norm3.model.jac(res_norm3.params).shape
jac = res_norm3.model.jac(res_norm3.params)
print np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params
jac2 = res_norm3.model.jac(res_norm3.params, centered=True)
print np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac))))
print res_norm3.bse
print res2.bse
|
apache-2.0
|
nhejazi/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
104
|
3139
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
jtorrents/networkx
|
doc/make_gallery.py
|
12
|
2477
|
#!/usr/bin/env python
# generate a thumbnail gallery of examples
template = """\
{%% extends "layout.html" %%}
{%% set title = "Gallery" %%}
{%% block body %%}
<h3>Click on any image to see source code</h3>
<br/>
%s
{%% endblock %%}
"""
link_template = """\
<a href="%s"><img src="%s" border="0" alt="%s"/></a>
"""
import os, glob, re, shutil, sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot
import matplotlib.image
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
examples_source_dir = '../examples/drawing'
examples_dir = 'examples/drawing'
template_dir = 'source/templates'
static_dir = 'source/static/examples'
pwd=os.getcwd()
rows = []
if not os.path.exists(static_dir):
os.makedirs(static_dir)
os.chdir(examples_source_dir)
all_examples=sorted(glob.glob("*.py"))
# check for out of date examples
stale_examples=[]
for example in all_examples:
png=example.replace('py','png')
png_static=os.path.join(pwd,static_dir,png)
if (not os.path.exists(png_static) or
os.stat(png_static).st_mtime < os.stat(example).st_mtime):
stale_examples.append(example)
for example in stale_examples:
print example,
png=example.replace('py','png')
matplotlib.pyplot.figure(figsize=(6,6))
stdout=sys.stdout
sys.stdout=open('/dev/null','w')
try:
execfile(example)
sys.stdout=stdout
print " OK"
except ImportError,strerr:
sys.stdout=stdout
sys.stdout.write(" FAIL: %s\n"%strerr)
continue
matplotlib.pyplot.clf()
im=matplotlib.image.imread(png)
fig = Figure(figsize=(2.5, 2.5))
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks
=[])
# basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
thumbfile=png.replace(".png","_thumb.png")
fig.savefig(thumbfile)
shutil.copy(thumbfile,os.path.join(pwd,static_dir,thumbfile))
shutil.copy(png,os.path.join(pwd,static_dir,png))
basename, ext = os.path.splitext(example)
link = '%s/%s.html'%(examples_dir, basename)
rows.append(link_template%(link, os.path.join('_static/examples',thumbfile), basename))
os.chdir(pwd)
fh = open(os.path.join(template_dir,'gallery.html'), 'w')
fh.write(template%'\n'.join(rows))
fh.close()
|
bsd-3-clause
|
mmoussallam/bird
|
demo_meg_denoising.py
|
1
|
2805
|
# Authors: Alexandre Gramfort <[email protected]>
# Manuel Moussallam <[email protected]>
#
# License: BSD (3-clause)
from scipy import linalg
from meeg_tools import simu_meg
from bird import bird, s_bird
from joblib import Memory
if __name__ == '__main__':
white = False # change to True/False for white/pink noise
scales = [8, 16, 32, 64, 128]
n_runs = 30
# Structured sparsity parameters
n_channels = 20 # Set this value to 20 to reproduce figures from the paper
p_active = 1.
random_state = 42
# Reference true data
# Note : due to some changes in MNE, simulated data is no longer
# parameterized using explicit SNR values, but rather using the NAVE parameter
# Look up some documentation there: https://mne.tools/dev/generated/mne.simulation.simulate_evoked.html#mne.simulation.simulate_evoked
seed = 42
evoked_no_noise = simu_meg(nave=10000, white=True, seed=seed)
single_no_noise = evoked_no_noise.data[:n_channels, :]
# noisy simulation : to simulate a SNR of approximately 10
# we use 10 times less averaged epochs (nave parameter set to 2000)
evoked_noise = simu_meg(nave=2000, white=white, seed=seed)
single_noise = evoked_noise.data[:n_channels, :]
n_jobs = 1 # set to -1 to run in parellel
memory = Memory(None)
p_above = 1e-8
bird_estimate = bird(single_noise, scales, n_runs, p_above=p_above,
random_state=random_state, n_jobs=n_jobs,
memory=memory)
sbird_estimate = s_bird(single_noise, scales, n_runs, p_above=p_above,
p_active=p_active, random_state=random_state,
n_jobs=n_jobs, memory=memory)
print("RMSE BIRD : %s" % linalg.norm(bird_estimate - single_noise))
print("RMSE S-BIRD : %s" % linalg.norm(sbird_estimate - single_noise))
subset = range(1, n_channels, 2)
start = 100 # make time start at 0
import matplotlib.pyplot as plt
plt.figure(figsize=(7, 5))
p1 = plt.plot(1e3 * evoked_no_noise.times[start:],
single_noise[subset, start:].T, 'k:', alpha=0.5)
p2 = plt.plot(1e3 * evoked_no_noise.times[start:],
single_no_noise[subset, start:].T, 'r:', linewidth=1.5)
p3 = plt.plot(1e3 * evoked_no_noise.times[start:],
bird_estimate[subset, start:].T, 'k-', linewidth=1.5)
p4 = plt.plot(1e3 * evoked_no_noise.times[start:],
sbird_estimate[subset, start:].T, 'm-', linewidth=1.5)
plt.legend((p1[0], p2[0], p3[0], p4[0]),
('Noisy', 'Clean', 'BIRD Estimates', 'S-BIRD Estimates'),
loc='upper right')
plt.xlabel('Time (ms)')
plt.ylabel('MEG')
plt.ylim([-1.5e-12, 2.5e-12])
plt.show()
|
bsd-3-clause
|
fyffyt/scikit-learn
|
benchmarks/bench_plot_fastkmeans.py
|
294
|
4676
|
from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
bsd-3-clause
|
jasset75/py-boletus
|
test/performance_test.py
|
1
|
1568
|
from datetime import datetime
import pandas as pd
import numpy as np
from tqdm import tqdm
stats = {}
def draw_to_str(draw, sep='_'):
nums = np.asarray(draw).tolist()
return sep.join(map(str,nums))
def check_draw(df_historical, draw, sort=True):
df = df_historical.copy()
for i, row in tqdm(enumerate(df.values), total=df.shape[0], desc='historical'):
s_row = set(row[1:7])
s_draw = set(draw)
success = len(s_draw.intersection(s_row))
df.at[i, 'draw'] = draw_to_str(draw)
df.at[i, 'success'] = success
df.at[i, 'comp'] = row[7] in s_draw
return df[df['success'] > 2]
if __name__ == '__main__':
f_test = r'/home/apollo/work/py-boletus/test/oh_fortuna.csv'
df_test = pd.read_csv(f_test, names=['N1', 'N2', 'N3', 'N4', 'N5', 'N6'])
f_historical = r'/home/apollo/work/py-boletus/data/ES-bonoloto.csv'
df_historical = pd.read_csv(f_historical, parse_dates=['FECHA'])
f_out = '/home/apollo/work/py-boletus/out/{0}_{1}_fortuna_{2}.csv'
df_total = pd.DataFrame()
for i, draw in tqdm(enumerate(df_test.values), total=df_test.shape[0], desc='draws'):
df_parcial = check_draw(df_historical, draw, sort=False)
if df_total.empty:
df_total = df_parcial.copy()
else:
df_total = df_total.append(df_parcial, sort=False)
df_total.sort_values(by=['success', 'comp', 'FECHA'], ascending=False, inplace=True)
max_num_success = df_total['success'].max()
df_total.to_csv(f_out.format(max_num_success, 'boletus', 'M'))
|
mit
|
debsankha/networkx
|
examples/graph/unix_email.py
|
62
|
2683
|
#!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2005 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
|
bsd-3-clause
|
akhilaananthram/nupic.research
|
sequence_prediction/continuous_sequence/comparePerformance.py
|
1
|
4889
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv, sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from optparse import OptionParser
from swarm_runner import SwarmRunner
rcParams.update({'figure.autolayout': True})
plt.ion()
plt.close('all')
def loadDataFile(filePath):
reader = csv.reader(filePath)
ncol=len(next(reader)) # Read first line and count columns
if ncol == 1:
data = pd.read_csv(filePath, header=0, skiprows=[1,2], names=['value'])
else:
data = pd.read_csv(filePath, header=0, skiprows=[1,2], names=['step', 'value'])
data = data['value'].astype('float').tolist()
data = np.array(data)
return data
def NRMSE(data, pred):
return np.sqrt(np.nanmean(np.square(pred-data)))/np.sqrt(np.nanmean( np.square(data-np.nanmean(data))))
def plotPerformance(dataSet, nTrain):
filePath = './data/' + dataSet + '.csv'
print "load test data from ", filePath
trueData = loadDataFile(filePath)
filePath = './prediction/' + dataSet + '_TM_pred.csv'
print "load TM prediction from ", filePath
predData_TM = loadDataFile(filePath)
# filePath = './prediction/' + dataSet + '_ARIMA_pred_cont.csv'
# predData_ARIMA = loadDataFile(filePath)
N = min(len(predData_TM), len(trueData))
print "nTrain: ", nTrain
print "nTest: ", len(trueData[nTrain:])
TM_lag = 1
predData_shift = np.roll(trueData, 1)
predData_TM = np.roll(predData_TM, TM_lag)
trueData = trueData[nTrain:]
predData_TM = predData_TM[nTrain:]
predData_shift = predData_shift[nTrain:]
# predData_ARIMA = predData_ARIMA[lag:N]
NRMSE_TM = NRMSE(trueData, predData_TM)
# NRMSE_ARIMA = NRMSE(trueData, predData_ARIMA)
NRMSE_Shift = NRMSE(trueData, predData_shift)
resTM = abs(trueData-predData_TM)
res_shift = abs(trueData-predData_shift)
resTM = resTM[np.isnan(resTM) == False]
res_shift = res_shift[np.isnan(res_shift) == False]
print "NRMSE: Shift", NRMSE_Shift
print "NRMSE: TM", NRMSE_TM
# print "NRMSE: ARIMA", NRMSE_ARIMA
plt.figure(1)
plt.plot(trueData, label='True Data')
plt.plot(predData_shift, label='Trival NRMSE: '+"%0.3f" % NRMSE_Shift)
# plt.plot(predData_ARIMA, label='ARIMA NRMSE: '+"%0.3f" % NRMSE_ARIMA)
plt.plot(predData_TM, label='TM, NRMSE: '+"%0.3f" % NRMSE_TM)
plt.legend()
plt.xlabel('Time')
fileName = './result/'+dataSet+"modelPrediction.pdf"
print "save example prediction trace to ", fileName
plt.savefig(fileName)
plt.figure(2)
xl = [0, max(max(resTM), max(res_shift))]
plt.subplot(2,2,1)
plt.hist(resTM)
plt.title('TM median='+"%0.3f" % np.median(resTM)+' NRMSE: '+"%0.3f" % NRMSE_TM)
plt.xlim(xl)
plt.xlabel("|residual|")
plt.subplot(2,2,3)
plt.hist(res_shift)
plt.title('Trivial median='+"%0.3f" % np.median(res_shift)+' NRMSE: '+"%0.3f" % NRMSE_Shift)
plt.xlim(xl)
plt.xlabel("|residual|")
fileName = './result/'+dataSet+"error_distribution.pdf"
print "save residual error distribution to ", fileName
plt.savefig(fileName)
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default=0,
dest="dataSet",
help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass")
(options, args) = parser.parse_args(sys.argv[1:])
return options, args
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
SWARM_CONFIG = SwarmRunner.importSwarmDescription(dataSet)
nTrain = SWARM_CONFIG["streamDef"]['streams'][0]['last_record']
print 'Compare Model performance for ', dataSet
plotPerformance(dataSet, nTrain)
|
gpl-3.0
|
cowlicks/numpy
|
numpy/lib/twodim_base.py
|
83
|
26903
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
bsd-3-clause
|
geeklhem/pimad
|
bin/figures.py
|
1
|
6889
|
from __future__ import division
import os
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import sys
import platform
import time
from pimad import pip
from pimad import invasion
from pimad import draw
from pimad.models.toycontinuous import ToyContinuous, ToyContinuousNLC, ToyContinuousGST, ToyContinuousSigB
MODEL = ToyContinuous
MODEL_CODE = "TOY"
param = {"n": 5000, # Number of patches
"T": 100, # Patch size
"ip":0.01, # Initial proportions of mutants
"r":0.5, # Resident trait value
"mu":0, # Mutation rate
"b":20, # Benefits coefficient
"c":1, # Cost coefficient
"g":100, # Number of generations
"dz":0.01,
"replica": 25,
"time": time.asctime(),
"host": "|".join(platform.uname()),
"precision":0.01,
"invfitness_g":10,
"lk_R":1000,
# Specific to threshold
"T_range": [20,100],
"b_range": [3,4,5,6,7,8,9,10,15,20,25,30,35,40],
"kmax":10,
"thres_r":1,
#Specific to trajectories
"range_ip":[0.001,0.005,0.01,0.05,0.1],
"range_g":[10,50,100,200],
#NLC & GST & SIG
"chi": 4,
"alpha":0.75,
"k":.3,
"s":.1
}
def get_definition(model,param,pre="%",su=""):
s = "% MODEL: " + model.model_name+"\n %PARAMETERS:\n"
s += "\n".join(["{} {}={} {}".format(pre,k,v,su)for k,v in param.items()])
return s
if __name__ == "__main__":
DO = "ALL"
print sys.argv
if len(sys.argv) >= 2:
DO = sys.argv[1]
if len(sys.argv) >= 3:
par = sys.argv[2].split("AND")
#print par
for st in par:
k,v = st.split("=")
param[k] = eval(v)
print "{} set to {}".format(k,v)
if len(sys.argv) >= 4:
if sys.argv[3] == "NLC":
MODEL = ToyContinuousNLC
MODEL_CODE = sys.argv[3]
if sys.argv[3] == "GST":
MODEL = ToyContinuousGST
MODEL_CODE = sys.argv[3]
if sys.argv[3] == "SIG":
MODEL = ToyContinuousSigB
MODEL_CODE = sys.argv[3]
print("DO: {} with model {}".format(DO,MODEL_CODE))
param["m"] = param["r"] + param["dz"]
## Figure 2: Numerical PIP ##
if DO == "pip" or DO == "ALL":
print "{:-^80}".format(" PIP ")
pip_file = "pip{}_T{}_n{}_step{}_repl_{}_b{}_ip{}".format(MODEL_CODE, param["T"],
param["n"], param["precision"],
param["replica"], param["b"],
param["ip"])
print pip_file
if not os.path.exists(pip_file+".pkle"):
pip_data,pip_param = pip.mp_pip(MODEL,param.copy(),param["precision"])
with open(pip_file+".pkle","w") as fi:
pickle.dump((pip_data,pip_param),fi)
print "saved {}.pkle".format(pip_file)
else:
with open(pip_file+".pkle","r") as fi:
pip_data,pip_param = pickle.load(fi)
if not os.path.exists(pip_file+".eps"):
draw.pip(pip_data,False)
plt.savefig(pip_file+".eps")
with open(pip_file+".eps","a") as fi:
fi.write(get_definition(MODEL,pip_param))
print "saved {}.eps".format(pip_file)
## Figure 3: INVASION Heatmap ##
if DO == "heatmap" or DO == "ALL":
print "{:-^80}".format(" HEATMAP ")
heatmap_file = "heatmap_r_{}_m{}_repl{}".format(param["r"],param["m"],param["replica"])
if not os.path.exists(heatmap_file+".pkle"):
data,out_param = invasion.heatmap(MODEL,param.copy())
with open(heatmap_file+".pkle","w") as fi:
pickle.dump((data,out_param),fi)
else:
with open(heatmap_file+".pkle","r") as fi:
data,out_param = pickle.load(fi)
if not os.path.exists(heatmap_file+".eps"):
draw.heatmap(data,out_param,False)
plt.savefig(heatmap_file+".eps")
with open(heatmap_file+".eps","a") as fi:
fi.write(get_definition(MODEL,out_param))
## Figure 4: Sociality threshold ##
if DO == "threshold" or DO == "ALL":
print "{:-^80}".format(" SCORE THRESHOLD ")
threshold_file = "threshold_kmax{}_{}T_{}b_g{}_{}repl".format(param["kmax"],
len(param["T_range"]),
len(param["b_range"]),
param["g"],
param["replica"])
if not os.path.exists(threshold_file+".pkle"):
data,out_param = invasion.threshold(MODEL,param.copy())
with open(threshold_file+".pkle","w") as fi:
pickle.dump((data,out_param),fi)
else:
with open(threshold_file+".pkle","r") as fi:
data,out_param = pickle.load(fi)
if not os.path.exists(threshold_file+".eps"):
draw.threshold(data)
plt.savefig(threshold_file+".eps")
with open(threshold_file+".eps","a") as fi:
fi.write(get_definition(MODEL,out_param))
## Figure S1: Quelques trajectoires d'invasion (different ip) ##
if DO == "trajectoires":
print "{:-^80}".format(" Trajectories ")
traj_file = "trajectories_m{}_{}ip_{}g".format(param["m"],len(param["range_ip"]),len(param["range_g"]))
if not os.path.exists(traj_file+".pkle"):
data = [np.zeros((len(param["range_ip"]), len(param["range_g"]))),
np.zeros((len(param["range_ip"]), len(param["range_g"])))]
for j,g in enumerate(param["range_g"]):
for i,ip in enumerate(param["range_ip"]):
print i,j,data[0].shape,data[1].shape
param["g"] = g
param["ip"] = ip
data[0][i,j],data[1][i,j] = invasion.mp_invasion_fitness(MODEL,param)
with open(traj_file+".pkle","w") as fi:
del param["g"]
del param["ip"]
pickle.dump((data,param),fi)
else:
with open(traj_file+".pkle","r") as fi:
data,param = pickle.load(fi)
if not os.path.exists(traj_file+"eps"):
draw.trajectories(data,param)
plt.savefig(traj_file+".eps")
with open(traj_file+".eps","a") as fi:
fi.write(get_definition(MODEL,param))
|
gpl-3.0
|
geolovic/TProfiler
|
test/04_profile_from_river.py
|
1
|
3412
|
# -*- coding: utf-8 -*-
"""
José Vicente Pérez
Granada University (Spain)
June, 2017
Testing suite for profiler.py
Last modified: 29 October 2017
"""
import time
import profiler as p
import ogr
import matplotlib.pyplot as plt
import numpy as np
print("Tests for profiler.profiles_from_rivers()")
def test01():
"""
Test for profiles_from_rivers() function
Testing all rivers in rivers with tributaries
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para profiles_from_rivers function")
print("Testing all rivers with tributaries")
print("Test in progress...")
# Test parameters
fac = "data/in/darro25fac.tif"
dem = "data/in/darro25.tif"
river_shapefile = "data/in/rios.shp"
id_field = "id_rio"
name_field = "name"
perfiles = p.profiles_from_rivers(fac, dem, river_shapefile, id_field=id_field, name_field=name_field)
draw_profiles(perfiles)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test02():
"""
Test for profiles_from_rivers() function
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para profiles_from_rivers function")
print("Testing all rivers with tributaries, no id, no namefield")
print("Test in progress...")
# Test parameters
fac = "data/in/darro25fac.tif"
dem = "data/in/darro25.tif"
river_shapefile = "data/in/rios.shp"
# Check with id and name_field
perfiles = p.profiles_from_rivers(fac, dem, river_shapefile)
draw_profiles(perfiles)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test03():
"""
Test for profiles_from_rivers() function
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para profiles_from_rivers function")
print("Testing all rivers without tributaries")
print("Test in progress...")
# Test parameters
fac = "data/in/darro25fac.tif"
dem = "data/in/darro25.tif"
river_shapefile = "data/in/rios.shp"
id_field = "id_rio"
name_field = "name"
# Get profiles
perfiles = p.profiles_from_rivers(fac, dem, river_shapefile, id_field, name_field, tributaries=False)
draw_profiles(perfiles)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test04():
"""
Test for profiles_from_rivers() function
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para profiles_from_rivers function")
print("Testing a river shapefile with disconected rivers")
print("Test in progress...")
# Test parameters
fac = "data/in/darro25fac.tif"
dem = "data/in/darro25.tif"
river_shapefile = "data/in/rios2.shp"
id_field = "id_rio"
name_field = "name"
# Get profiles
perfiles = p.profiles_from_rivers(fac, dem, river_shapefile, id_field, name_field, tributaries=True)
draw_profiles(perfiles)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def draw_profiles(perfiles):
fig = plt.figure()
ax = fig.add_subplot(111)
for perfil in perfiles:
chi = perfil.get_chi(False)
zi = perfil.get_z(False)
ax.plot(chi, zi, label=perfil.name)
ax.legend()
plt.show()
test01()
test02()
test03()
test04()
|
gpl-3.0
|
naritta/numpy
|
numpy/core/code_generators/ufunc_docstrings.py
|
51
|
90047
|
"""
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the
inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``, the result has the same sign as
the divisor `x2`. It is equivalent to the Python modulus operator
``x1 % x2`` and should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a
scalar if both `x1` and `x2` are scalars.
See Also
--------
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
|
bsd-3-clause
|
uhjish/seaborn
|
doc/sphinxext/ipython_directive.py
|
37
|
37557
|
# -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
|
bsd-3-clause
|
idlead/scikit-learn
|
examples/svm/plot_svm_anova.py
|
12
|
2017
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
liyu1990/sklearn
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
11221
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
15
|
7882
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import check_skip_travis
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
# FIXME: This test is unstable on Travis, see issue #3190 for more detail.
check_skip_travis()
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
saiwing-yeung/scikit-learn
|
benchmarks/bench_plot_randomized_svd.py
|
57
|
17557
|
"""
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
|
bsd-3-clause
|
ebellm/ztf_summerschool_2015
|
bootcamp_software/test_python_packages.py
|
1
|
5597
|
from __future__ import print_function
import subprocess
import sys
# test for Anaconda
pypath = subprocess.check_output(["which", "python"])
if b"anaconda" not in pypath.split(b"/"):
print("WARNING: You are not running the anaconda distribution of Python!")
print("\t If this is intentional, ignore this message.")
print("\t If you attempted to install anaconda, check your PATH...")
print("\t you may need to prepend the ~/anaconda/bin/ directory.")
# test for necessary libraries
# NumPy
try:
import numpy
numpy_version = numpy.__version__
if numpy_version[0:4] != '1.11':
print("minor warning: you are running a version of numpy < 1.11")
print("\t to update in anaconda, use the command line:")
print("\t $> conda update numpy")
except ImportError:
print("WARNING: You do not have the numpy package installed")
print("\t your Python is bare bones, please install anaconda")
# astropy
try:
import astropy
astropy_version = astropy.__version__
if astropy_version[0:3] != '1.2':
print("minor warning: you are running a version of astropy < 1.2")
print("\t to update in anaconda, use the command line:")
print("\t $> conda update astropy")
except ImportError:
print("WARNING: You do not have the astropy package installed")
print("\t to install in anaconda, use the command line:")
print("\t $> conda install astropy")
print("\t if you aren't using anaconda consider using pip")
# glob
try:
import glob
except ImportError:
print("WARNING: You do not have the glob package installed")
print("\t your Python is bare bones, please install anaconda")
# matplotlib
try:
import matplotlib
matplotlib_version = matplotlib.__version__
if matplotlib_version[0:3] != '1.5':
print("minor warning: you are running a version of matplotlib < 1.5")
print("\t to update in anaconda, use the command line:")
print("\t $> conda update matplotlib")
except ImportError:
print("WARNING: You do not have the matplotlib package installed")
print("\t your Python is bare bones, please install anaconda")
# shelve
try:
import shelve
except ImportError:
print("WARNING: You do not have the shelve package installed")
print("\t your Python is bare bones, please install anaconda")
# pickle
try:
import pickle
except ImportError:
print("WARNING: You do not have the pickle package installed")
print("\t your Python is bare bones, please install anaconda")
# time
try:
import time
except ImportError:
print("WARNING: You do not have the time package installed")
print("\t your Python is bare bones, please install anaconda")
# astroML
try:
import astroML
astroml_version = astroML.__version__
if astroml_version[0:3] != '0.3':
print("minor warning: you are running a version of astroML < 0.3")
print("\t consider upgrading")
except ImportError:
print("WARNING: You do not have the astroML package installed")
print("\t to install in anaconda, use the command line:")
print("\t $> conda install --channel https://conda.binstar.org/astropy astroML")
print("\t if you aren't using anaconda consider using pip:")
print("\t $> pip install astroML")
print("\t ALSO! Speed up your code, by running this (all Python):")
print("\t $> pip install astroML_addons")
# gatspy
try:
import gatspy
gatspy_version = gatspy.__version__
if gatspy_version[0:3] != '0.3':
print("minor warning: you are running a version of gatspy < 0.3")
print("\t consider upgrading")
except ImportError:
print("WARNING: You do not have the gatspy package installed")
print("\t to install, use the command line:")
print("\t $> pip install gatspy")
# astroquery
try:
import astroquery
astroquery_version = astroquery.__version__
if astroquery_version[0:3] != '0.3':
print("minor warning: you are running a version of astroquery < 0.3")
print("\t consider upgrading")
except ImportError:
print("WARNING: You do not have the astroquery package installed")
print("\t to install astroquery use pip on the command line:")
print("\t $> pip install astroquery")
# sklearn
try:
import sklearn
sklearn_version = sklearn.__version__
if sklearn_version[0:4] != '0.17':
print("minor warning: you are running a version of scikit-learn < 0.16")
print("\t to update in anaconda, use the command line:")
print("\t $> conda update scikit-learn")
except ImportError:
print("WARNING: You do not have the scikit-learn package installed")
print("\t to install in anaconda, use the command line:")
print("\t $> conda install scikit-learn")
print("\t if you aren't using anaconda consider using pip:")
print("\t $> pip install scikit-learn")
# FATS (only compatible with python2 for now)
if sys.version[0] == '2':
try:
import FATS
except ImportError:
print("WARNING: You do not have the FATS package installed")
print("\t to install FATS use the pip on the command line:")
print("\t $> pip install FATS")
# IPython
try:
import IPython
ipy_version = IPython.__version__
if ipy_version[0] != '4':
print("minor warning: you are running a version of ipython < 4.0.0")
print("\t to update in anaconda, use the command line:")
print("\t $> conda update ipython")
except ImportError:
print("WARNING: You do not have IPython installed")
print("\t your Python is bare bones, please install anaconda")
|
bsd-3-clause
|
ablifedev/ABLIRC
|
ABLIRC/bin/Clip-Seq/ABLIFE/peak_overlap_between_ablife_and_piranha.py
|
1
|
12831
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#
#####################################################################################
"""
程序功能说明:
1.将实验组与对照组非overlap的peaks挑选出来
程序设计思路:
利用HTSeq模块的GenomicArrayOfSets来记录对照组的peaks,然后遍历实验组peaks,如果peak所在的interval
有对照组的peak部分存在,则跳过。
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
from ablib.utils.tools import *
import gffutils
import HTSeq
import numpy
import multiprocessing
from matplotlib import pyplot
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-e', '--exp', dest='exp', action='store',
type='string', help='ablife peaks文件,头三列需分别是peak的:染色体,start,end')
p.add_option(
'-c', '--ctrl', dest='ctrl', action='store',
type='string', help='piranha peaks文件,头三列需分别是peak的:染色体,start,end')
p.add_option(
'-o', '--outfile', dest='outfile', default='peak_overlap_between_ablife_and_piranha.txt', action='store',
type='string', help='peak_overlap_between_ablife_and_piranha')
p.add_option(
'-n', '--samplename', dest='samplename', default='', action='store',
type='string', help='sample name,default is ""')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string', help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=False, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '_'
if opt.outfile == 'peak_overlap_between_ablife_and_piranha.txt':
opt.outfile = sample + opt.outfile
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
logging.debug("Main procedure start...")
gas = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
# #chr start end name tags strand pvalue
# chr1 2392860 2392880 X 8 + 0.000238833
title2 = getTitle(opt.ctrl)
piranha_total_peaknum = 0
for eachLine in open(opt.ctrl,'r'):
line=eachLine.strip().split("\t")
if eachLine.startswith("#"):
continue
peak_iv = HTSeq.GenomicInterval(line[0], int(line[1]) - 1, int(line[2]), line[5])
gas[peak_iv]+=eachLine.strip()
piranha_total_peaknum += 1
w = open(opt.outfile,"w")
title1 = getTitle(opt.exp)
w.writelines("ablife"+title1+"\tpiranha"+title2+"\n")
ablife_total_peaknum = 0
for eachLine in open(opt.exp,'r'):
line=eachLine.strip().split("\t")
if eachLine.startswith("#"):
continue
ablife_total_peaknum += 1
peak_iv = HTSeq.GenomicInterval(line[0], int(line[1]) - 1, int(line[2]), line[5])
peak_len = int(line[2]) - int(line[1]) + 1
flag = 0
overlap_length = 0
for iv, fs in gas[peak_iv].steps():
if len(fs) >= 1:
flag = 1
overlap_length += iv.length
for p in fs:
w.writelines(eachLine.strip()+"\t"+str(iv.length)+"\t"+p+'\n')
# if flag == 0:
# print(eachLine.strip()+"\t"+str(peak_len))
# else:
# print(eachLine.strip()+str(overlap_length)+'\n')
w.close()
tmp = os.popen('cut -f 11,12,13 '+opt.outfile+' | sort|uniq|wc -l').readlines()
piranha_peaknum = int(tmp[0].strip()) - 1
tmp = os.popen('cut -f 4 '+opt.outfile+' | sort|uniq|wc -l').readlines()
ablife_peaknum = int(tmp[0].strip()) - 1
ablife_overlap_percent = round(100 * float(ablife_peaknum) / ablife_total_peaknum, 2)
piranha_overlap_percent = round(100 * float(piranha_peaknum) / piranha_total_peaknum, 2)
print("ablife total peaks:"+str(ablife_total_peaknum))
print("ablife overlap peaks:"+str(ablife_peaknum)+ "(" + str(ablife_overlap_percent) + "%)" )
print("piranha total peaks:"+str(piranha_total_peaknum))
print("piranha overlap peaks:"+str(piranha_peaknum)+ "(" + str(piranha_overlap_percent) + "%)" )
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# if not opt.keepTemp:
# os.system('rm -rf ' + tempPath)
# logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def countProgram(programName, startT, runT, isTest):
countProgramFile = open('/users/ablife/ablifepy/countProgram.txt', 'a')
countProgramFile.write(
programName + '\t' + str(os.getlogin()) + '\t' + str(startT) + '\t' + str(runT) + 's\t' + isTest + '\n')
countProgramFile.close()
testStr = 'P'
if opt.isTest:
testStr = 'T'
countProgram(sys.argv[0], startTime, runningTime, testStr)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
|
mit
|
johndpope/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/dataframe.py
|
85
|
4704
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
|
apache-2.0
|
untom/scikit-learn
|
examples/classification/plot_lda.py
|
164
|
2224
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
cshallue/models
|
research/differential_privacy/pate/ICLR2018/plot_partition.py
|
1
|
13619
|
"""Produces two plots. One compares aggregators and their analyses. The other
illustrates sources of privacy loss for Confident-GNMax.
A script in support of the paper "Scalable Private Learning with PATE" by
Nicolas Papernot, Shuang Song, Ilya Mironov, Ananth Raghunathan, Kunal Talwar,
Ulfar Erlingsson (https://arxiv.org/abs/1802.08908).
The input is a file containing a numpy array of votes, one query per row, one
class per column. Ex:
43, 1821, ..., 3
31, 16, ..., 0
...
0, 86, ..., 438
The output is written to a specified directory and consists of two files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import pickle
import sys
sys.path.append('..') # Main modules reside in the parent directory.
from absl import app
from absl import flags
from collections import namedtuple
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import core as pate
import smooth_sensitivity as pate_ss
plt.style.use('ggplot')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('cache', False,
'Read results of privacy analysis from cache.')
flags.DEFINE_string('counts_file', None, 'Counts file.')
flags.DEFINE_string('figures_dir', '', 'Path where figures are written to.')
flags.DEFINE_float('threshold', None, 'Threshold for step 1 (selection).')
flags.DEFINE_float('sigma1', None, 'Sigma for step 1 (selection).')
flags.DEFINE_float('sigma2', None, 'Sigma for step 2 (argmax).')
flags.DEFINE_integer('queries', None, 'Number of queries made by the student.')
flags.DEFINE_float('delta', 1e-8, 'Target delta.')
flags.mark_flag_as_required('counts_file')
flags.mark_flag_as_required('threshold')
flags.mark_flag_as_required('sigma1')
flags.mark_flag_as_required('sigma2')
Partition = namedtuple('Partition', ['step1', 'step2', 'ss', 'delta'])
def analyze_gnmax_conf_data_ind(votes, threshold, sigma1, sigma2, delta):
orders = np.logspace(np.log10(1.5), np.log10(500), num=100)
n = votes.shape[0]
rdp_total = np.zeros(len(orders))
answered_total = 0
answered = np.zeros(n)
eps_cum = np.full(n, None, dtype=float)
for i in range(n):
v = votes[i,]
if threshold is not None and sigma1 is not None:
q_step1 = np.exp(pate.compute_logpr_answered(threshold, sigma1, v))
rdp_total += pate.rdp_data_independent_gaussian(sigma1, orders)
else:
q_step1 = 1. # always answer
answered_total += q_step1
answered[i] = answered_total
rdp_total += q_step1 * pate.rdp_data_independent_gaussian(sigma2, orders)
eps_cum[i], order_opt = pate.compute_eps_from_delta(orders, rdp_total,
delta)
if i > 0 and (i + 1) % 1000 == 0:
print('queries = {}, E[answered] = {:.2f}, E[eps] = {:.3f} '
'at order = {:.2f}.'.format(
i + 1,
answered[i],
eps_cum[i],
order_opt))
sys.stdout.flush()
return eps_cum, answered
def analyze_gnmax_conf_data_dep(votes, threshold, sigma1, sigma2, delta):
# Short list of orders.
# orders = np.round(np.logspace(np.log10(20), np.log10(200), num=20))
# Long list of orders.
orders = np.concatenate((np.arange(20, 40, .2),
np.arange(40, 75, .5),
np.logspace(np.log10(75), np.log10(200), num=20)))
n = votes.shape[0]
num_classes = votes.shape[1]
num_teachers = int(sum(votes[0,]))
if threshold is not None and sigma1 is not None:
is_data_ind_step1 = pate.is_data_independent_always_opt_gaussian(
num_teachers, num_classes, sigma1, orders)
else:
is_data_ind_step1 = [True] * len(orders)
is_data_ind_step2 = pate.is_data_independent_always_opt_gaussian(
num_teachers, num_classes, sigma2, orders)
eps_partitioned = np.full(n, None, dtype=Partition)
order_opt = np.full(n, None, dtype=float)
ss_std_opt = np.full(n, None, dtype=float)
answered = np.zeros(n)
rdp_step1_total = np.zeros(len(orders))
rdp_step2_total = np.zeros(len(orders))
ls_total = np.zeros((len(orders), num_teachers))
answered_total = 0
for i in range(n):
v = votes[i,]
if threshold is not None and sigma1 is not None:
logq_step1 = pate.compute_logpr_answered(threshold, sigma1, v)
rdp_step1_total += pate.compute_rdp_threshold(logq_step1, sigma1, orders)
else:
logq_step1 = 0. # always answer
pr_answered = np.exp(logq_step1)
logq_step2 = pate.compute_logq_gaussian(v, sigma2)
rdp_step2_total += pr_answered * pate.rdp_gaussian(logq_step2, sigma2,
orders)
answered_total += pr_answered
rdp_ss = np.zeros(len(orders))
ss_std = np.zeros(len(orders))
for j, order in enumerate(orders):
if not is_data_ind_step1[j]:
ls_step1 = pate_ss.compute_local_sensitivity_bounds_threshold(v,
num_teachers, threshold, sigma1, order)
else:
ls_step1 = np.full(num_teachers, 0, dtype=float)
if not is_data_ind_step2[j]:
ls_step2 = pate_ss.compute_local_sensitivity_bounds_gnmax(
v, num_teachers, sigma2, order)
else:
ls_step2 = np.full(num_teachers, 0, dtype=float)
ls_total[j,] += ls_step1 + pr_answered * ls_step2
beta_ss = .49 / order
ss = pate_ss.compute_discounted_max(beta_ss, ls_total[j,])
sigma_ss = ((order * math.exp(2 * beta_ss)) / ss) ** (1 / 3)
rdp_ss[j] = pate_ss.compute_rdp_of_smooth_sensitivity_gaussian(
beta_ss, sigma_ss, order)
ss_std[j] = ss * sigma_ss
rdp_total = rdp_step1_total + rdp_step2_total + rdp_ss
answered[i] = answered_total
_, order_opt[i] = pate.compute_eps_from_delta(orders, rdp_total, delta)
order_idx = np.searchsorted(orders, order_opt[i])
# Since optimal orders are always non-increasing, shrink orders array
# and all cumulative arrays to speed up computation.
if order_idx < len(orders):
orders = orders[:order_idx + 1]
rdp_step1_total = rdp_step1_total[:order_idx + 1]
rdp_step2_total = rdp_step2_total[:order_idx + 1]
eps_partitioned[i] = Partition(step1=rdp_step1_total[order_idx],
step2=rdp_step2_total[order_idx],
ss=rdp_ss[order_idx],
delta=-math.log(delta) / (order_opt[i] - 1))
ss_std_opt[i] = ss_std[order_idx]
if i > 0 and (i + 1) % 1 == 0:
print('queries = {}, E[answered] = {:.2f}, E[eps] = {:.3f} +/- {:.3f} '
'at order = {:.2f}. Contributions: delta = {:.3f}, step1 = {:.3f}, '
'step2 = {:.3f}, ss = {:.3f}'.format(
i + 1,
answered[i],
sum(eps_partitioned[i]),
ss_std_opt[i],
order_opt[i],
eps_partitioned[i].delta,
eps_partitioned[i].step1,
eps_partitioned[i].step2,
eps_partitioned[i].ss))
sys.stdout.flush()
return eps_partitioned, answered, ss_std_opt, order_opt
def plot_comparison(figures_dir, simple_ind, conf_ind, simple_dep, conf_dep):
"""Plots variants of GNMax algorithm and their analyses.
"""
def pivot(x_axis, eps, answered):
y = np.full(len(x_axis), None, dtype=float) # delta
for i, x in enumerate(x_axis):
idx = np.searchsorted(answered, x)
if idx < len(eps):
y[i] = eps[idx]
return y
def pivot_dep(x_axis, data_dep):
eps_partitioned, answered, _, _ = data_dep
eps = [sum(p) for p in eps_partitioned] # Flatten eps
return pivot(x_axis, eps, answered)
xlim = 10000
x_axis = range(0, xlim, 10)
y_simple_ind = pivot(x_axis, *simple_ind)
y_conf_ind = pivot(x_axis, *conf_ind)
y_simple_dep = pivot_dep(x_axis, simple_dep)
y_conf_dep = pivot_dep(x_axis, conf_dep)
# plt.close('all')
fig, ax = plt.subplots()
fig.set_figheight(4.5)
fig.set_figwidth(4.7)
ax.plot(x_axis, y_simple_ind, ls='--', color='r', lw=3, label=r'Simple GNMax, data-ind analysis')
ax.plot(x_axis, y_conf_ind, ls='--', color='b', lw=3, label=r'Confident GNMax, data-ind analysis')
ax.plot(x_axis, y_simple_dep, ls='-', color='r', lw=3, label=r'Simple GNMax, data-dep analysis')
ax.plot(x_axis, y_conf_dep, ls='-', color='b', lw=3, label=r'Confident GNMax, data-dep analysis')
plt.xticks(np.arange(0, xlim + 1000, 2000))
plt.xlim([0, xlim])
plt.ylim(bottom=0)
plt.legend(fontsize=16)
ax.set_xlabel('Number of queries answered', fontsize=16)
ax.set_ylabel(r'Privacy cost $\varepsilon$ at $\delta=10^{-8}$', fontsize=16)
ax.tick_params(labelsize=14)
plot_filename = os.path.join(figures_dir, 'comparison.pdf')
print('Saving the graph to ' + plot_filename)
fig.savefig(plot_filename, bbox_inches='tight')
plt.show()
def plot_partition(figures_dir, gnmax_conf, print_order):
"""Plots an expert version of the privacy-per-answered-query graph.
Args:
figures_dir: A name of the directory where to save the plot.
eps: The cumulative privacy cost.
partition: Allocation of the privacy cost.
answered: Cumulative number of queries answered.
order_opt: The list of optimal orders.
"""
eps_partitioned, answered, ss_std_opt, order_opt = gnmax_conf
xlim = 10000
x = range(0, int(xlim), 10)
lenx = len(x)
y0 = np.full(lenx, np.nan, dtype=float) # delta
y1 = np.full(lenx, np.nan, dtype=float) # delta + step1
y2 = np.full(lenx, np.nan, dtype=float) # delta + step1 + step2
y3 = np.full(lenx, np.nan, dtype=float) # delta + step1 + step2 + ss
noise_std = np.full(lenx, np.nan, dtype=float)
y_right = np.full(lenx, np.nan, dtype=float)
for i in range(lenx):
idx = np.searchsorted(answered, x[i])
if idx < len(eps_partitioned):
y0[i] = eps_partitioned[idx].delta
y1[i] = y0[i] + eps_partitioned[idx].step1
y2[i] = y1[i] + eps_partitioned[idx].step2
y3[i] = y2[i] + eps_partitioned[idx].ss
noise_std[i] = ss_std_opt[idx]
y_right[i] = order_opt[idx]
# plt.close('all')
fig, ax = plt.subplots()
fig.set_figheight(4.5)
fig.set_figwidth(4.7)
fig.patch.set_alpha(0)
l1 = ax.plot(
x, y3, color='b', ls='-', label=r'Total privacy cost', linewidth=1).pop()
for y in (y0, y1, y2):
ax.plot(x, y, color='b', ls='-', label=r'_nolegend_', alpha=.5, linewidth=1)
ax.fill_between(x, [0] * lenx, y0.tolist(), facecolor='b', alpha=.5)
ax.fill_between(x, y0.tolist(), y1.tolist(), facecolor='b', alpha=.4)
ax.fill_between(x, y1.tolist(), y2.tolist(), facecolor='b', alpha=.3)
ax.fill_between(x, y2.tolist(), y3.tolist(), facecolor='b', alpha=.2)
ax.fill_between(x, (y3 - noise_std).tolist(), (y3 + noise_std).tolist(),
facecolor='r', alpha=.5)
plt.xticks(np.arange(0, xlim + 1000, 2000))
plt.xlim([0, xlim])
ax.set_ylim([0, 3.])
ax.set_xlabel('Number of queries answered', fontsize=16)
ax.set_ylabel(r'Privacy cost $\varepsilon$ at $\delta=10^{-8}$', fontsize=16)
# Merging legends.
if print_order:
ax2 = ax.twinx()
l2 = ax2.plot(
x, y_right, 'r', ls='-', label=r'Optimal order', linewidth=5,
alpha=.5).pop()
ax2.grid(False)
# ax2.set_ylabel(r'Optimal Renyi order', fontsize=16)
ax2.set_ylim([0, 200.])
# ax.legend((l1, l2), (l1.get_label(), l2.get_label()), loc=0, fontsize=13)
ax.tick_params(labelsize=14)
plot_filename = os.path.join(figures_dir, 'partition.pdf')
print('Saving the graph to ' + plot_filename)
fig.savefig(plot_filename, bbox_inches='tight', dpi=800)
plt.show()
def run_all_analyses(votes, threshold, sigma1, sigma2, delta):
simple_ind = analyze_gnmax_conf_data_ind(votes, None, None, sigma2,
delta)
conf_ind = analyze_gnmax_conf_data_ind(votes, threshold, sigma1, sigma2,
delta)
simple_dep = analyze_gnmax_conf_data_dep(votes, None, None, sigma2,
delta)
conf_dep = analyze_gnmax_conf_data_dep(votes, threshold, sigma1, sigma2,
delta)
return (simple_ind, conf_ind, simple_dep, conf_dep)
def run_or_load_all_analyses():
temp_filename = os.path.expanduser('~/tmp/partition_cached.pkl')
if FLAGS.cache and os.path.isfile(temp_filename):
print('Reading from cache ' + temp_filename)
with open(temp_filename, 'rb') as f:
all_analyses = pickle.load(f)
else:
fin_name = os.path.expanduser(FLAGS.counts_file)
print('Reading raw votes from ' + fin_name)
sys.stdout.flush()
votes = np.load(fin_name)
if FLAGS.queries is not None:
if votes.shape[0] < FLAGS.queries:
raise ValueError('Expect {} rows, got {} in {}'.format(
FLAGS.queries, votes.shape[0], fin_name))
# Truncate the votes matrix to the number of queries made.
votes = votes[:FLAGS.queries, ]
all_analyses = run_all_analyses(votes, FLAGS.threshold, FLAGS.sigma1,
FLAGS.sigma2, FLAGS.delta)
print('Writing to cache ' + temp_filename)
with open(temp_filename, 'wb') as f:
pickle.dump(all_analyses, f)
return all_analyses
def main(argv):
del argv # Unused.
simple_ind, conf_ind, simple_dep, conf_dep = run_or_load_all_analyses()
figures_dir = os.path.expanduser(FLAGS.figures_dir)
plot_comparison(figures_dir, simple_ind, conf_ind, simple_dep, conf_dep)
plot_partition(figures_dir, conf_dep, True)
plt.close('all')
if __name__ == '__main__':
app.run(main)
|
apache-2.0
|
toobaz/pandas
|
pandas/io/json/_json.py
|
1
|
35551
|
from io import StringIO
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import ensure_str, is_period_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
from pandas.core.reshape.concat import concat
from pandas.io.common import (
BaseIterator,
_get_handle,
_infer_compression,
_stringify_path,
get_filepath_or_buffer,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
from ._normalize import convert_to_line_delimits
from ._table_schema import build_table_schema, parse_table_schema
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = "0.20.0"
# interface to/from
def to_json(
path_or_buf,
obj,
orient=None,
date_format="epoch",
double_precision=10,
force_ascii=True,
date_unit="ms",
default_handler=None,
lines=False,
compression="infer",
index=True,
):
if not index and orient not in ["split", "table"]:
raise ValueError(
"'index=False' is only valid when 'orient' is " "'split' or 'table'"
)
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
if orient == "table" and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or "values")
if orient == "table" and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj,
orient=orient,
date_format=date_format,
double_precision=double_precision,
ensure_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
index=index,
).write()
if lines:
s = convert_to_line_delimits(s)
if isinstance(path_or_buf, str):
fh, handles = _get_handle(path_or_buf, "w", compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer:
def __init__(
self,
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
index,
default_handler=None,
):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return self._write(
self.obj,
self.orient,
self.double_precision,
self.ensure_ascii,
self.date_unit,
self.date_format == "iso",
self.default_handler,
)
def _write(
self,
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler,
)
class SeriesWriter(Writer):
_default_orient = "index"
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == "index":
raise ValueError(
"Series index must be unique for orient="
"'{orient}'".format(orient=self.orient)
)
def _write(
self,
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
):
if not self.index and orient == "split":
obj = {"name": obj.name, "data": obj.values}
return super()._write(
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
)
class FrameWriter(Writer):
_default_orient = "columns"
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in ("index", "columns"):
raise ValueError(
"DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient)
)
if not self.obj.columns.is_unique and self.orient in (
"index",
"columns",
"records",
):
raise ValueError(
"DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient)
)
def _write(
self,
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
):
if not self.index and orient == "split":
obj = obj.to_dict(orient="split")
del obj["index"]
return super()._write(
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
)
class JSONTableWriter(FrameWriter):
_default_orient = "records"
def __init__(
self,
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
index,
default_handler=None,
):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super().__init__(
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
index,
default_handler=default_handler,
)
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`".format(fmt=date_format)
)
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplemented on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError("orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
(obj.ndim == 1)
and (obj.name in set(obj.index.names))
or len(obj.columns & obj.index.names)
):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = "iso"
self.orient = "records"
self.index = index
def _write(
self,
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
):
data = super()._write(
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data
)
return serialized
def read_json(
path_or_buf=None,
orient=None,
typ="frame",
dtype=None,
convert_axes=None,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=False,
chunksize=None,
compression="infer",
):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.json``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : {'frame', 'series'}, default 'frame'
The type of object to recover.
dtype : bool or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : bool, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : bool or list of str, default True
List of columns to parse for dates. If True, then try to parse
datelike columns. A column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``.
keep_default_dates : bool, default True
If parsing dates, then parse the default datelike columns.
numpy : bool, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : bool, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality.
date_unit : str, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
lines : bool, default False
Read the file as a json object per line.
chunksize : int, optional
Return JsonReader object for iteration.
See the `line-delimited json docs
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
The type returned depends on the value of `typ`.
See Also
--------
DataFrame.to_json : Convert a DataFrame to a JSON string.
Series.to_json : Convert a Series to a JSON string.
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
if orient == "table" and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == "table" and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != "table":
dtype = True
if convert_axes is None and orient != "table":
convert_axes = True
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression
)
json_reader = JsonReader(
filepath_or_buffer,
orient=orient,
typ=typ,
dtype=dtype,
convert_axes=convert_axes,
convert_dates=convert_dates,
keep_default_dates=keep_default_dates,
numpy=numpy,
precise_float=precise_float,
date_unit=date_unit,
encoding=encoding,
lines=lines,
chunksize=chunksize,
compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(
self,
filepath_or_buffer,
orient,
typ,
dtype,
convert_axes,
convert_dates,
keep_default_dates,
numpy,
precise_float,
date_unit,
encoding,
lines,
chunksize,
compression,
):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, "read") and not self.chunksize:
data = data.read()
if not hasattr(data, "read") and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
The function read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, str):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
compression=self.compression,
)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return "[" + ",".join(lines) + "]"
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = ensure_str(self.data)
obj = self._get_object_parser(self._combine_lines(data.split("\n")))
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient,
"dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates,
"numpy": self.numpy,
"precise_float": self.precise_float,
"date_unit": self.date_unit,
}
obj = None
if typ == "frame":
obj = FrameParser(json, **kwargs).parse()
if typ == "series" or obj is None:
if not isinstance(dtype, bool):
kwargs["dtype"] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it.
If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser:
_STAMP_UNITS = ("s", "ms", "us", "ns")
_MIN_STAMPS = {
"s": 31536000,
"ms": 31536000000,
"us": 31536000000000,
"ns": 31536000000000000,
}
def __init__(
self,
json,
orient,
dtype=None,
convert_axes=True,
convert_dates=True,
keep_default_dates=False,
numpy=False,
precise_float=False,
date_unit=None,
):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError(
"date_unit must be one of {units}".format(units=self._STAMP_UNITS)
)
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS["s"]
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(
"JSON data had unexpected key(s): {bad_keys}".format(
bad_keys=pprint_thing(bad_keys)
)
)
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True
)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
"""
Try to parse a ndarray like into a column by inferring dtype.
"""
# don't try to coerce, unless a force conversion
if use_dtypes:
if not self.dtype:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (
self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype
)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == "object":
# try float
try:
data = data.astype("float64")
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == "f":
if data.dtype != "float64":
# coerce floats to 64
try:
data = data.astype("float64")
result = True
except (TypeError, ValueError):
pass
# don't coerce 0-len data
if len(data) and (data.dtype == "float" or data.dtype == "object"):
# coerce ints if we can
try:
new_data = data.astype("int64")
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == "int":
# coerce floats to 64
try:
data = data.astype("int64")
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
"""
Try to parse a ndarray like into a date column.
Try to coerce object in epoch/iso formats and integer/float in epoch
formats. Return a boolean if parsing was successful.
"""
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == "object":
try:
new_data = data.astype("int64")
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (
isna(new_data.values)
| (new_data > self.min_stamp)
| (new_data.values == iNaT)
)
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors="raise", unit=date_unit)
except ValueError:
continue
except Exception:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = "index"
_split_keys = ("name", "index", "data")
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {
str(k): v
for k, v in loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(
json, dtype=None, numpy=True, precise_float=self.precise_float
)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(
*loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
)
else:
self.obj = Series(
loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
)
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
"data", self.obj, convert_dates=self.convert_dates
)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = "columns"
_split_keys = ("columns", "index", "data")
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(
json, dtype=None, numpy=True, precise_float=self.precise_float
)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(
loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
)
else:
self.obj = DataFrame(
*loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
)
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
elif orient == "split":
decoded = {
str(k): v
for k, v in loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = (
DataFrame.from_dict(
loads(json, precise_float=self.precise_float),
dtype=None,
orient="index",
)
.sort_index(axis="columns")
.sort_index(axis="index")
)
elif orient == "table":
self.obj = parse_table_schema(json, precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.items()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False)
)
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
"""
Return if this col is ok to try for a date parse.
"""
if not isinstance(col, str):
return False
col_lower = col.lower()
if (
col_lower.endswith("_at")
or col_lower.endswith("_time")
or col_lower == "modified"
or col_lower == "date"
or col_lower == "datetime"
or col_lower.startswith("timestamp")
):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: (
(self.keep_default_dates and is_ok(col)) or col in convert_dates
),
)
|
bsd-3-clause
|
miloharper/neural-network-animation
|
matplotlib/sankey.py
|
11
|
40247
|
#!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Added assertions to catch common calling errors
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey:
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
assert gap >= 0, (
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
assert radius <= gap, (
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
assert head_angle >= 0, (
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
assert tolerance >= 0, (
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
#[6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
#path[1] = path[1][-1:0:-1]
#path[1][0] = first_action
#path[2] = path[2][::-1]
#return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
assert len(orientations) == n, (
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
assert len(labels) == n, (
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
assert trunklength >= 0, (
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
assert prior >= 0, "The index of the prior diagram is negative."
assert min(connect) >= 0, (
"At least one of the connection indices is negative.")
assert prior < len(self.diagrams), (
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
assert connect[0] < len(self.diagrams[prior].flows), (
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
assert connect[1] < n, (
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
assert self.diagrams[prior].angles[connect[0]] is not None, (
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
assert abs(flow_error) < self.tolerance, (
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
assert orient == -1, (
"The value of orientations[%d] is %d, "
"but it must be -1, 0, or 1." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
assert len(pathlengths) == n, (
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_point
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
|
mit
|
soylentdeen/Graffity
|
src/ParabolicMirror/gridsearch.py
|
1
|
1218
|
import scipy
import numpy
import matplotlib.pyplot as pyplot
import astropy.io.fits as pyfits
import glob
datadir = '/home/cdeen/Data/CIAO/UT3/Alignment/2016-08-31_3/PARABOLA_SEARCH-190246/'
files = glob.glob(datadir+'*.fits')
scan = []
flux = []
scans = []
for f in files:
data = pyfits.getdata(f).field('Intensities')
header = pyfits.getheader(f)
scan.append(header.get('ESO TPL EXPNO'))
flux.append(numpy.max(numpy.mean(data, axis=1)))
scans.append(numpy.mean(data, axis=1))
middle = numpy.array([1717503, 1667465])
corner = middle - numpy.array([250000, 250000])
step = 5000
scan = numpy.array(scan)
order = numpy.argsort(scan)
scan = scan[order]*step+corner[0]
scans = numpy.array(scans)[order]
flux = numpy.array(flux)[order]
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
for s in scans[40:50]:
ax.plot(s)
#ax.plot(scan, flux)
ax.set_ylabel("Average Subaperture Intensity")
ax.set_xlabel("Frame number")
ax.set_xbound(3200, 3500)
fig.show()
fig.savefig("Scan40to50.png")
raw_input()
ax.clear()
ax.plot(scan, flux)
ax.set_xlabel('Scan number')
ax.set_ylabel('Brightest Frame')
ax.get_xaxis().get_major_formatter().set_useOffset(False)
fig.show()
|
mit
|
igorcoding/os-simulation
|
src/simulation.py
|
1
|
2801
|
# coding=utf-8
import simpy
import pylab
from src.sim.os_simulator import OsSimulator
from src.stats.global_stats import GlobalStats
class Simulation(object):
def __init__(self):
super(Simulation, self).__init__()
@staticmethod
def _experiment(stats, simulation_time, **params):
env = simpy.Environment()
simulator = OsSimulator(env, stats, **params)
simulator.start()
env.run(until=simulation_time)
del simulator
@staticmethod
def _generate_configs(data):
configs = []
for d in data['buffer_latency']:
for l in data['gen_lambda']:
config = dict(delta=data['delta'], buffer_size=data['buffer_size'] - 1, buffer_latency=d,
gen_lambda=l, time_distrib=data['time_distrib'])
configs.append(config)
return configs
def simulation(self, **data):
results = GlobalStats()
configs = self._generate_configs(data)
for i, conf in enumerate(configs):
print "Running configuration %d/%d" % (i+1, len(configs))
bulk_stats = results.get_new_bulk_stats(**conf)
for j in xrange(data['exp_per_conf']):
print "%d/%d" % (j+1, data['exp_per_conf']),
stats = bulk_stats.get_new_stats()
self._experiment(stats, data['sim_time'], **conf)
print
pylab.matplotlib.rc('font', family='Arial')
pylab.clf()
for d in data['buffer_latency']:
plot_total = results.get_avg_total_time_vs_lambda(d)
plot_inner = results.get_avg_inner_time_vs_lambda(d)
plots = [
dict(title=u'Среднее общее время пребывания задания (внешняя очередь + система)', file='avg_total.png', points=plot_total),
dict(title=u'Среднее время пребывания задания в системе', file='avg_inner.png', points=plot_inner),
]
for i, p in enumerate(plots):
pylab.figure(i)
pylab.plot(*zip(*p['points']), label='d = %d' % d)
pylab.legend()
pylab.xlabel(u'λ')
pylab.ylabel(u'Время')
pylab.grid(True)
pylab.title(p['title'])
pylab.savefig(p['file'])
pylab.close()
del results
def main():
data = {
'sim_time': 1000,
'exp_per_conf': 1,
'buffer_latency': xrange(1, 22, 5),
'gen_lambda': xrange(1, 101),
'buffer_size': 5,
'delta': 9,
'time_distrib': dict(mu=40, sigma=10)
}
s = Simulation()
s.simulation(**data)
if __name__ == "__main__":
main()
|
mit
|
nguyentu1602/statsmodels
|
statsmodels/sandbox/nonparametric/tests/ex_smoothers.py
|
33
|
1413
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 04 10:51:39 2011
@author: josef
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.sandbox.nonparametric import smoothers, kernels
from statsmodels.regression.linear_model import OLS, WLS
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 100
lb, ub = -1, 2
x = np.linspace(lb, ub, nobs)
x = np.sin(x)
exog = x[:,None]**np.arange(order+1)
y_true = exog.sum(1)
y = y_true + sigma_noise * np.random.randn(nobs)
#xind = np.argsort(x)
pmod = smoothers.PolySmoother(2, x)
pmod.fit(y) #no return
y_pred = pmod.predict(x)
error = y - y_pred
mse = (error*error).mean()
print(mse)
res_ols = OLS(y, exog[:,:3]).fit()
print(np.squeeze(pmod.coef) - res_ols.params)
weights = np.ones(nobs)
weights[:nobs//3] = 0.1
weights[-nobs//5:] = 2
pmodw = smoothers.PolySmoother(2, x)
pmodw.fit(y, weights=weights) #no return
y_predw = pmodw.predict(x)
error = y - y_predw
mse = (error*error).mean()
print(mse)
res_wls = WLS(y, exog[:,:3], weights=weights).fit()
print(np.squeeze(pmodw.coef) - res_wls.params)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.plot(y, '.')
plt.plot(y_true, 'b-', label='true')
plt.plot(y_pred, '-', label='poly')
plt.plot(y_predw, '-', label='poly -w')
plt.legend(loc='upper left')
plt.close()
#plt.show()
|
bsd-3-clause
|
AsimmHirani/ISpyPi
|
tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
18
|
13185
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tempfile
# pylint: disable=g-bad-todo
# TODO(#6568): Remove this hack that makes dlopen() not crash.
# pylint: enable=g-bad-todo
# pylint: disable=g-import-not-at-top
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
bzamecnik/ml
|
snippets/keras/lstm_char_rnn/lstm_text_generation.py
|
2
|
5412
|
'''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
Source: https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
License: MIT (see https://github.com/fchollet/keras/blob/master/LICENSE)
'''
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
from keras.utils.np_utils import to_categorical
import numpy as np
import random
from sklearn.preprocessing import LabelEncoder
import sys
class Dataset:
def __init__(self, frame_size=40, hop_size=3):
self.frame_size = frame_size
self.hop_size = hop_size
path = get_file('nietzsche.txt',
origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
self.text = open(path).read().lower()
print('corpus length:', len(self.text))
chars = sorted(list(set(self.text)))
self.class_count = len(chars)
print('total chars:', self.class_count)
self.le = LabelEncoder().fit(chars)
self.text_ohe = self.text_to_ohe(self.text)
def split_to_frames(values, frame_size, hop_size):
"""
Split to overlapping frames.
"""
return np.stack(values[i:i + frame_size] for i in
range(0, len(values) - frame_size + 1, hop_size))
def split_features_targets(frames):
"""
Split each frame to features (all but last element)
and targets (last element).
"""
frame_size = frames.shape[1]
X = frames[:, :frame_size - 1]
y = frames[:, -1]
return X, y
# cut the text in semi-redundant sequences of frame_size characters
self.X, self.y = split_features_targets(split_to_frames(
self.text_ohe, frame_size + 1, hop_size))
print('X.shape:', self.X.shape, 'y.shape:', self.y.shape)
def ohe_to_text(self, text_ohe):
return self.le_to_text(text_ohe.argmax(axis=1))
def text_to_ohe(self, text):
return self.le_to_ohe(self.text_to_le(list(text)))
def le_to_text(self, text_le):
return ''.join(self.le.inverse_transform(text_le))
def text_to_le(self, text):
return self.le.transform(text)
def le_to_ohe(self, text_le):
return to_categorical(text_le, nb_classes=self.class_count)
class Model:
def __init__(self, dataset):
self.dataset = dataset
def create_model(dataset):
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(dataset.frame_size, dataset.class_count)))
model.add(Dense(dataset.class_count))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizer)
return model
self.model = create_model(self.dataset)
def fit_with_preview(self):
# output generated text after each iteration
preview_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: self.preview())
self.model.fit(
self.dataset.X, self.dataset.y,
batch_size=1000, nb_epoch=60,
callbacks=[preview_callback])
def generate_chars(self, seed_text, length, temperature=1.0):
def sample(preds, temperature):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
window_ohe = self.dataset.text_to_ohe(seed_text)
for i in range(length):
# single data point
x = window_ohe[np.newaxis]
probs = self.model.predict(x, verbose=0)[0]
next_index = sample(probs, temperature)
yield dataset.le_to_text(next_index)
next_ohe = dataset.le_to_ohe([next_index])
window_ohe = np.vstack([window_ohe[1:], next_ohe])
def preview(self, seed_text=None, length=100):
if seed_text is None:
start_index = random.randint(0, len(dataset.text) - dataset.frame_size - 1)
seed_text = self.dataset.text[start_index:start_index + dataset.frame_size]
print()
print('----- Generating with seed: "' + seed_text + '"')
for temperature in [0.2, 0.5, 1.0, 1.2]:
print('----- temperature:', temperature)
for char in self.generate_chars(seed_text, length, temperature):
sys.stdout.write(char)
sys.stdout.flush()
print()
if __name__ == '__main__':
dataset = Dataset()
model = Model(dataset)
model.fit_with_preview()
|
mit
|
joshloyal/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
112
|
1819
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
rvelseg/string_resonance
|
string_resonance.py
|
1
|
12954
|
from pylab import plot, show, subplots
from numpy import arange, pi, ma, tanh, sin, fft, zeros
from matplotlib import animation
from detect_peaks import detect_peaks
__author__ = "R. Velasco-Segura and Pablo L. Rendon"
__version__ = "0.2"
__license__ = "BSD"
# ------------------------------------------------------
# ------- configurable parameters
ampl = 1 # amplitude of the emitted wave
L = 3.0 # resonator length in meters
x_max = 4.0 # domain length in meters
Nx = 50 # number of grid points
dx = x_max/(Nx - 1.0) # *don't change* # spatial step in meters
t_max = 1.0 # final time in seconds
f_ini = 100.0 # initial frequency in Hz
f_fin = 300.0 # final frequency in Hz
c = 343.0 # speed of sound in m/s
# 343 m/s is typically the speed of sound in air, and not necessarily
# on a string. However, it is not that different from typical values
# in guitar strings.
mic_pos = x_max/8.0 # microphone position in meters
# Chirp type. The possible values this variable can take are:
#
# "exp" : stands for exponential, which sometimes is also called
# logarithmic chirp.
#
# "lin" : linear chirp.
#
chirp_type = "exp"
# Temporal step. The choice dt=dx/c has some advantages. However, be
# aware that in this case the obstacle as harmonic oscillator only is
# not usable: small values of KoM have no effect, and large values of
# KoM make the numeric system unstable, without an intermediate
# region. When the obstacle is only an harmonic oscillator, with no
# estra mass, dt=0.9*dx/c works fine.
dt = 1*dx/c
# Output type. The possible values this variable can take are:
#
# "file" : The script creates a video file. You probably will have to
# adjust the writer to something that works on your system.
#
# "fly" : A visualization is displayed on the fly, as data is been
# generated.
#
output = "fly"
# ---------------------------------------------------
# ------- calculated and fixed paramenters, ---------
# ------- don't change these parameters.
t_min = 0.0 # initial time in seconds
w = 2.0*pi*f_ini # current value of angular frequency
phi = 0.0 # phase correction
s_fc = 1.0
n = 0 # current step
init_sim = 0
x = arange(0,x_max+dx/2.0,dx) # spatial domain
cdtdx2 = (c*dt/dx)**2
dt2 = dt*dt
t_axis = arange(t_min,t_max+dt/2.0,dt) # temporal domain
mic_i = int(round(mic_pos/dx)) # microphone position index
obs_i = int(round(L/dx)) # obstacle position index
# ----------------------------------------------------
y_next = zeros(Nx) # solution at t
y_now = y_next.copy() # solution at t-dt
y_prev = y_now.copy() # solution at t-2*dt
mic = []
mic_t = []
am_min = []
am_max = []
am = []
am_s = []
f_ax = []
fft_spec = []
fft_axis = []
fft_axis_t = []
fft_peaks = []
fig, ax = subplots(3)
def init_y() :
global y_now, y_prev, x
for i in range(0,len(x)) :
y_now[i] = 0
y_prev[i] = 0
def step_y() :
global y_next, y_now, y_prev
global dt2, cdtdx2, dx
global obs_i
for i in range(1, len(y_next)-1) :
y_next[i] = (- y_prev[i] +
2 * y_now[i] +
cdtdx2 * (y_now[i-1] - 2*y_now[i] + y_now[i+1]) )
# Obstacle :
i = obs_i
# # harmonic oscillator
# KoM = 1e5 # this is k/m
# y_next[i] -= (dt2/dx)*KoM*y_now[i]
# extra mass
MM = 5.0
kappa = 4e5
k = kappa/dx
y_next[i] += (dt2*(k/MM) - cdtdx2) * (y_now[i-1] - 2*y_now[i] + y_now[i+1])
def boundary_y(t) :
global y_now, y_prev
global x, w, phi, ampl
# absorbing boundary
y_now[-1] = y_prev[-2]
# forcing boundary
y_now[0] = ( ampl *
sin( w * (t - x[0]) + phi) *
( 0.5 + 0.5 * tanh( t * 1715 - 4 ) ) )
def pp(x,t) :
# This function is used to draw a sine wave of the current emitted
# frequency, to be compared with y_now.
global w, phi
ret = ( ampl *
sin( w * (t - x) + phi) *
( 0.5 + 0.5 * tanh( t * 1715 - 4 ) ) )
return ret
def get_mic(pos, t) :
global y_now
global mic, mic_t
mic.append(y_now[pos])
mic_t.append(t)
# static like variables for this function
# norm = 0
# prev_s = 1
# prev_dd = 0
def get_am(pos, t) :
global am_max, am_min
global w, dt, mic
# global s_fc
# global norm, prev_s, prev_dd
global f_fin
ii = len(mic)
period = 2*pi/w
i_p = int(period/dt)
i_p_m = max(0, int(ii - 5*i_p))
# i_p_m2 = max(0, int(ii - 10*i_p))
am_min.append(min(mic[i_p_m:]))
am_max.append(max(mic[i_p_m:]))
am.append(am_max[-1] - am_min[-1])
f_ax.append(w/(2*pi))
# dd = am[-1] - 2*am[i_p_m] + am[i_p_m2]
# print dd
# print am[-1], am[i_p_m]
# t_adp_start = t_min + (t_max-t_min)/10
# if (t > t_adp_start ) :
# if norm == 0 :
# norm = abs(am[-1] - am[i_p_m])/am[-1]
# m = abs(am[-1] - am[i_p_m])/(am[-1]*0.5*norm)
# m = min(5,m)
# # print "{:.5f}".format(m)
# if (am[-1] >= am[i_p_m]) :
# s_fc = 1
# else :
# s_fc = 1
# if prev_s != abs(s_fc)/s_fc :
# prev_s *= -1
# # f_fin = w/(2*pi)
def animate(t):
global x, w, phi, ampl, f_fin, dt
global s_fc
global y_next, y_now, y_prev
global cdtdx2
global n
global mic_t, mic
global fft_spec, fft_axis, fft_axis_t, fft_peaks
# TODO: This approach has acumulative error, the value of the
# frequency can be calculated without its previous value, do that.
if chirp_type == "exp" :
wnew = w * ( f_fin / f_ini )**(s_fc*dt/t_max)
elif chirp_type == "lin" :
# linear chirp gives trouble at low frequencies
if ( w > pi ) | ( s_fc > 0 ) :
wnew = w + s_fc*dt*2.0*pi*(f_fin-f_ini)/t_max;
else :
print "Low frequency bound reached."
wnew = w
else :
print "ERROR: chirp_type not recognized."
return
phi += t*(w-wnew)
w = wnew
boundary_y(t)
step_y()
if ( len(mic) > 10 ) :
if ( n%30 == 0 ) :
fft_norm = 2.0/len(mic_t)
fft_spec = abs(fft_norm * fft.fft(mic))[:len(mic_t)/2]
fft_axis = fft.fftfreq(len(mic_t), dt)[:len(mic_t)/2]
# mph stands for minimum peak height
fft_peaks = detect_peaks(fft_spec, mph=ampl/4.0)
else :
fft_spec = zeros(10)
fft_axis = range(0,10)
get_mic(mic_i, t)
get_am(mic_i, t)
n += 1
if n%100 == 0 :
print t
# Change the value used to calculate the module according to how
# offen do you want to refresh the plot.
if n%1 == 0 :
texts[0].set_text("t = " + "{:.4f}".format(t) + " s")
texts[1].set_text("current emitted frequency = " + "{:.4f}".format(w/(2*pi)) + " Hz")
texts[2].set_text("current step = " + str(n))
# texts[3].set_text("s_fc = " + str(s_fc))
peaks_str = ""
for peak_i in fft_peaks :
peaks_str += "{:.2f}, ".format(fft_axis[peak_i])
peaks_str = "FFT peaks [Hz]: " + peaks_str[:-2]
texts[9].set_text(peaks_str)
# solution 'y_next' doesn't have the boundary points, that is
# why 'y_now' is plotted.
lines[0].set_ydata(y_now)
# lines[1].set_ydata(pp(x,t))
lines[2].set_xdata(mic_t)
lines[2].set_ydata(mic)
lines[3].set_xdata(mic_t)
lines[3].set_ydata(am_min)
lines[4].set_xdata(mic_t)
lines[4].set_ydata(am_max)
lines[5].set_xdata(mic_t)
lines[5].set_ydata(am)
scatters[1].set_offsets([[obs_i*dx],[y_now[obs_i]]])
lines[6].set_xdata(f_ax)
lines[6].set_ydata(am)
lines[7].set_xdata(fft_axis)
lines[7].set_ydata(5*fft_spec)
y_prev = y_now.copy()
y_now = y_next.copy()
# You could return tuple(ax) but it makes the simulation slower.
return tuple(lines) + tuple(texts) + tuple(scatters)
def init_ani() :
global init_sim
# When the window is resized init_ani is called again, but we
# don't want our simulation to return to the initial state.
if init_sim == 0 :
init_y()
init_sim += 1
# It is neccesary to do this cleanup, otherwise objects get
# overlaped.
for line in lines :
line.set_ydata(ma.array(line.get_xdata(), mask=True))
for scatter in scatters :
scatter.set_offsets([[],[]])
for text in texts :
text.set_text('')
# costant values of the objects to be animated
lines[6].set_linestyle('--')
lines[7].set_linestyle('-')
lines[7].set_marker('+')
scatters[0].set_offsets([[mic_pos],[0]])
texts[0].set_transform(ax[0].transAxes)
texts[0].set_x(0.1)
texts[0].set_y(0.9)
texts[0].set_va('center')
texts[1].set_transform(ax[0].transAxes)
texts[1].set_x(0.4)
texts[1].set_y(0.9)
texts[1].set_va('center')
texts[2].set_transform(ax[0].transAxes)
texts[2].set_x(0.1)
texts[2].set_y(0.75)
texts[2].set_va('center')
texts[3].set_transform(ax[0].transData)
texts[3].set_x(mic_i*dx)
texts[3].set_y(-2*ampl)
texts[3].set_va('center')
texts[3].set_text('measuring point, aka microphone')
texts[4].set_transform(ax[0].transData)
texts[4].set_x(obs_i*dx)
texts[4].set_y(-2*ampl)
texts[4].set_va('center')
texts[4].set_text('obstacle')
texts[5].set_transform(ax[1].transAxes)
texts[5].set_x(0.1)
texts[5].set_y(0.8)
texts[5].set_va('center')
texts[5].set_text('Mic signal, upper envelope, lower envelope, and amplitude.')
texts[6].set_transform(ax[1].transAxes)
texts[6].set_x(0.1)
texts[6].set_y(0.65)
texts[6].set_va('center')
texts[6].set_text('Amplitude is the difference of the upper and lower ev.')
texts[7].set_transform(ax[2].transAxes)
texts[7].set_x(0.1)
texts[7].set_y(0.8)
texts[7].set_va('center')
texts[7].set_text('Dashed: Amplitude as function of the current emitted freq.')
texts[8].set_transform(ax[2].transAxes)
texts[8].set_x(0.1)
texts[8].set_y(0.65)
texts[8].set_va('center')
texts[8].set_text('Solid: 5 * FFT of the current signal')
texts[9].set_transform(ax[2].transAxes)
texts[9].set_x(0.1)
texts[9].set_y(0.5)
texts[9].set_va('center')
# not used
texts[10].set_transform(ax[2].transAxes)
texts[10].set_x(0.1)
texts[10].set_y(0.5)
texts[10].set_va('center')
return tuple(lines) + tuple(texts) + tuple(scatters)
# Create empty objects to be animated
lines = []
# first subplot
line = ax[0].plot(x,y_now)[0] # this particular object cannot be empty :S
lines.append(line)
line = ax[0].plot([],[])[0]
lines.append(line)
# second subplot
line = ax[1].plot([],[])[0]
lines.append(line)
line = ax[1].plot([],[])[0]
lines.append(line)
line = ax[1].plot([],[])[0]
lines.append(line)
line = ax[1].plot([],[])[0]
lines.append(line)
# third subplot
line = ax[2].plot([],[])[0]
lines.append(line)
line = ax[2].plot([],[])[0]
lines.append(line)
scatters = []
scatter = ax[0].scatter([],[],s=10)
scatters.append(scatter)
scatter = ax[0].scatter([],[],s=10)
scatters.append(scatter)
texts = []
text = ax[0].text([], [], '')
texts.append(text)
text = ax[0].text([], [], '')
texts.append(text)
text = ax[0].text([], [], '')
texts.append(text)
text = ax[0].text([], [], '')
texts.append(text)
text = ax[0].text([], [], '')
texts.append(text)
text = ax[1].text([], [], '')
texts.append(text)
text = ax[1].text([], [], '')
texts.append(text)
text = ax[2].text([], [], '')
texts.append(text)
text = ax[2].text([], [], '')
texts.append(text)
text = ax[2].text([], [], '')
texts.append(text)
text = ax[2].text([], [], '')
texts.append(text)
ax[0].set_ylim( (-ampl*10,ampl*10) )
ax[0].set_xlim( 0,x[-1] )
ax[0].set_xticks( (0,L) )
ax[0].set_xlabel("x [m]",labelpad=-10)
ax[1].set_ylim( (-ampl*10,ampl*30) )
ax[1].set_xlim( 0,t_max )
ax[1].set_xlabel("time [s]",labelpad=-10)
ax[2].set_ylim( (0,ampl*30) )
ax[2].set_xlim( f_ini,f_fin )
ax[2].set_xlabel("frequency [Hz]",labelpad=0)
ani = animation.FuncAnimation(fig, animate, arange(t_min, t_max, dt),
repeat=False, init_func=init_ani,
interval=10, blit=True)
# mng = fig.canvas.manager
# # To maximize the animation window you can try the following
# # lines. Each of them could work, or not, depending on your
# # system. I didn't manage to make this work when generating a video
# # file.
# ##################
# # mng.frame.Maximize(True)
# # mng.window.showMaximized()
# # mng.window.state('zoomed')
# mng.resize(*mng.window.maxsize()) # tkagg
# ##################
if output == "fly" :
show()
elif output == "file" :
ani.save("string.mp4", writer="avconv", fps=25)
else :
print "ERROR: output type not recognized."
|
bsd-2-clause
|
ntu-dsi-dcn/ntu-dsi-dcn
|
src/flow-monitor/examples/wifi-olsr-flowmon.py
|
27
|
7354
|
# -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(1)))
onOffHelper.SetAttribute("OffTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(0)))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
app.Start(ns.core.Seconds(ns.core.UniformVariable(20, 30).GetValue()))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
mattcieslak/DSI2
|
dsi2/aggregation/region_clusters.py
|
1
|
17921
|
#!/usr/bin/env python
import os
import numpy as np
from ..streamlines.track_dataset import RegionCluster, TrackDataset
from .cluster_ui import ClusterEditor
from ..streamlines.track_math import tracks_to_endpoints
from ..database.track_datasource import TrackDataSource
import matplotlib.pyplot as plt
from dipy.tracking import metrics as tm
from dipy.tracking import distances as td
from traits.api import HasTraits, Instance, Array, Enum, \
Str, File, on_trait_change, Bool, Dict, Range, Color, List, Int, \
Property, Button, DelegatesTo, on_trait_change, Str, Tuple
from traitsui.api import View, Group, Item, RangeEditor, EnumEditor, OKButton, CancelButton
from ..streamlines.track_math import region_pair_dict_from_roi_list
import networkx as nx
graphml_lookup = {
"scale33":"resolution83", "scale60":"resolution150",
"scale125":"resolution258", "scale250":"resolution500",
"scale500":"resolution1015"
}
class TerminationPatternPlotter(HasTraits):
pass
class RegionAggregator(ClusterEditor):
# Maps roi integer to a name
region_labels = Dict
# Maps pairs of roi integers to an index
region_pairs_to_index = Dict
index_to_region_pairs = Dict
regions = Array
# will a connection be allowed if it appears in ANY or ALL subjects?
across_subject_comparison_operation = Enum("Union","Intersection")
parameters = ["min_tracks"]
min_tracks = Range(low=0,high=100,value=1, auto_set=False,name="min_tracks",
desc="A cluster label must be assigned to at least this many tracks",
label="Minimum tracks",
parameter=True
)
atlas_name = Str("None",parameter=True)
# Buttons for the algorithm_widgets
b_plot_connection_vector = Button(label="Connection Vectors")
b_query_region_pairs = Button(label="Query a Region Pair")
b_change_postproc = Button(label="Change PostProcessor")
# Previously from "Atlas"
possible_atlases = List
atlas_graphml = File
connection_vector_plot_type = Enum("lines","imshow")
def _b_plot_connection_vector_fired(self):
"""Listen for button clicks"""
self.plot_connection_vectors()
@on_trait_change("atlas_name")
def set_atlas(self,atlas_name):
"""
Loads a numpy array from disk based on the string `atlas_name`.
Parameters:
-----------
atlas_name:Str
Must exist as a key in the TrackDataset.properties.atlases of each subject
Does:
-----
* Reads the graphml file from CMP corresponding to `atlas_name`
* Builds lookup tables for connection_id -> (region1, region2)
* Builds lookup tables for connection_id -> ("region1", region2)
"""
if not len(self.track_sets): return
self.atlas_name = atlas_name
# only use the first part of the atlas name to get lausanne labels
if not all([atlas_name in tds.properties.atlases.keys() for tds in self.track_sets]):
print "WARNING: Not all TrackDatasets have ", atlas_name
return
# Set the .connections attribute on each TrackDataset
for tds in self.track_sets:
tds.load_connections(self.atlas_name)
# =====================================================================================
# IF there is a graphml, load it to get the region names
self.atlas_graphml = self.track_sets[0].properties.atlases[atlas_name]["graphml_path"]
if self.atlas_graphml:
print "loading regions from", self.atlas_graphml
graph = nx.read_graphml(self.atlas_graphml)
for roi_id,roi_data in graph.nodes(data=True):
self.region_labels[roi_id] = roi_data
self.regions = np.array(sorted(map( int,graph.nodes() )))
self.region_pairs_to_index = region_pair_dict_from_roi_list(self.regions)
# Which regionpairs map to which unique id in this dataset?
self.index_to_region_pairs = dict(
[
(idxnum,(self.region_labels[str(id1)]['dn_name'],
self.region_labels[str(id2)]['dn_name']) ) \
for (id1,id2), idxnum in self.region_pairs_to_index.iteritems()
]
)
self.region_pair_strings_to_index = dict([
(value,key) for key,value in self.index_to_region_pairs.iteritems()
])
#self.update_clusters()
def get_region_pair_code(self,region1,region2):
if (region1,region2) in self.region_pair_strings_to_index:
return self.region_pair_strings_to_index[(region1,region2)]
if (region2,region1) in self.region_pair_strings_to_index:
return self.region_pair_strings_to_index[(region2,region1)]
else:
return None
def update_clusters(self):
"""
OVERRIDES THE BASE update_clusters so we can apply the
postproc filter
1) A first-pass "clusterize" is run over all the track_sets
2) The conection-vector matrix is built using all subjects
3) IF a post-processing is selected
3a) Statistics for each connection_id are used to determine
which connection_ids should be visualized
3b) clusterize is run again to collect stats for each dataset
AFTER they've been subset to contain only selected
connection_ids
"""
## First-pass: collect all the termination patterns
_clusters = []
self.label_lookup = []
# If we're not supposed to query tracks, clear the clusters and do nothing
if not self.render_tracks:
self.clusters = _clusters
return
# First-pass clustering
for tds in self.track_sets:
clusts, connection_id_map = self.clusterize(tds)
self.label_lookup.append(connection_id_map)
tds.set_clusters(clusts,
update_glyphs=(self.filter_operation=="None"))
_clusters += tds.clusters # collect the colorized version
self.clusters = _clusters
self.pre_filter_connections, self.pre_filter_matrix = self.connection_vector_matrix()
if self.filter_operation == "None":
self.post_filter_matrix, self.post_filter_connections,= \
self.pre_filter_matrix, self.pre_filter_connections
print "%"*5, "No Postprocessing Filter"
return
# A post-processing filter is to be applied
print "%"*5, "Applying", self.filter_operation, "filter"
OK_regions = self.post_processor.filter_clusters(
( self.pre_filter_connections,self.pre_filter_matrix) )
# Second pass: subset the track_sets so that only OK regions
# are plotted
filt_tracks = []
_clusters = []
self.label_lookup = []
for tds in self.track_sets:
ftds = tds.subset(tds.get_tracks_by_connection_id(OK_regions))
clusts, connection_id_map = self.clusterize(ftds)
self.label_lookup.append(connection_id_map)
ftds.set_clusters(clusts)
_clusters += ftds.clusters # collect the colorized version
filt_tracks.append(ftds)
self.clusters = _clusters
self.set_track_sets(filt_tracks)
self.post_filter_connections, self.post_filter_matrix = \
self.connection_vector_matrix()
def clusterize(self, ttracks):
"""
Operates **On a single TrackDataset**.
1) track_dataset.connections are tabulated
2) a RegionCluster is created for each connection_id
Nothing is returned.
"""
# Holds the cluster assignments for each track
clusters = []
label_id_map = {}
if self.atlas_name == "None":
print "Requires an atlas to be specified!!"
return clusters, label_id_map
labels = ttracks.connections
# Populate the clusters list
clustnum = 0
for k in np.unique(labels):
indices = np.flatnonzero(labels == k)
ntracks = len(indices)
if ntracks < self.min_tracks: continue
try:
start_region, end_region = self.index_to_region_pairs[k]
except KeyError:
print "Region clusterizer encountered an unexpected connection id: ", k
start_region, end_region = "undefined","undefined"
clustnum += 1
clusters.append(
RegionCluster(
start_coordinate = start_region,
end_coordinate = end_region,
ntracks = ntracks,
id_number = clustnum,
indices = indices,
connection_id = k,
scan_id = ttracks.scan_id
)
)
label_id_map[k] = clustnum-1
return clusters, label_id_map
def found_connections(self):
""" Returns a list of connections found by the tracks considered """
found_conns = [set([cl.connection_id for cl in ts.clusters]) for ts in self.track_sets]
connection_ids = []
if len(found_conns) == 0:
print "Found no connections"
return
connection_ids = set([])
for conns in found_conns:
if self.across_subject_comparison_operation == "Union":
connection_ids.update(conns)
elif self.across_subject_comparison_operation == "Intersection":
connection_ids.intersection_update(conns)
return sorted(list(connection_ids))
def connection_vector_matrix(self,n_top=0):
"""
Parameters
----------
n_top:int
only return the `n_top` highest ranked connections
Returns
-------
observed_connections:list
the results of the self.found_connections()
connection_vectors:np.ndarray
one row per dataset, one column per region pair
"""
observed_connections = self.found_connections()
connection_vectors = np.zeros((len(self.track_sets),
len(observed_connections)))
row_labels = []
for tds_num,(tds,lut) in enumerate(zip(self.track_sets,self.label_lookup)):
row_labels.append(tds.properties.scan_id)
for nconn, connection in enumerate(observed_connections):
idx = lut.get(connection,-999)
if idx < 0: continue
connection_vectors[tds_num,nconn] = tds.clusters[idx].ntracks
if n_top > 0:
top_indices = np.flatnonzero(( connection_vectors.shape[1] -1 \
- connection_vectors.argsort().argsort().sum(0).argsort().argsort()) \
< n_top )
return [observed_connections[n] for n in top_indices], connection_vectors[:,top_indices]
return observed_connections, connection_vectors
def plot_connection_vectors(self):
# Which data should we use?
if self.post_filter_connections.size == 0:
matrix = self.pre_filter_matrix
labels = self.pre_filter_connections
else:
matrix = self.post_filter_matrix
labels = self.post_filter_connections
# Pad dimensions
if matrix.ndim == 1:
matrix.shape = (1,matrix.shape[0])
if self.connection_vector_plot_type == "imshow":
self.plot_connection_vector_tiles(matrix, labels)
elif self.connection_vector_plot_type == "lines":
self.plot_connection_vector_lines(matrix, labels)
def plot_connection_vector_lines(self,matrix,labels):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
static_colors = all(
[not s.properties.dynamic_color_clusters for s in self.track_sets])
# Is each TrackDataset assigned a specific color?
if static_colors:
print "plotting using static colors"
for tds, vec in zip(self.track_sets,self.post_filter_matrix):
ax.plot( vec, label=tds.properties.scan_id,
color=[x/255. for x in tds.properties.static_color[:3]],
linewidth=4)
else:
for tds, vec in zip(self.track_sets, self.post_filter_matrix):
ax.plot(vec,label=tds.properties.scan_id,linewidth=4)
ax.legend()
ax.set_xticks(np.arange(self.post_filter_matrix.shape[1]))
ax.set_title("Observed Connections")
ax.set_ylabel("Streamline Count")
ax.set_xticklabels(
["(%s, %s)" % self.index_to_region_pairs.get(conn,("no-label","no-label")) \
for conn in self.post_filter_connections ] )
plt.xticks(rotation=90,size=12)
plt.subplots_adjust(bottom=0.55)
fig.show()
return fig
def plot_connection_vector_tiles(self,matrix,labels):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(cv,interpolation="nearest", aspect="auto")
ax.set_yticks(np.arange(cv.shape[0]))
ax.set_yticklabels([tds.properties.scan_id for tds in self.track_sets])
ax.set_xticks(np.arange(cv.shape[1]))
ax.set_title("Observed Connections")
ax.set_ylabel("Streamline Count")
ax.set_xticklabels(
["(%s, %s)" % self.index_to_region_pairs[conn] \
for conn in connections ] )
plt.xticks(rotation=90,size=12)
plt.subplots_adjust(bottom=0.45)
fig.show()
return fig
def get_R_dat(self,subject_ids,roi_name):
"""
Saves a tab-delimited text file of the Termination Patterns found.
Parameters
----------
subject_ids:list
subject names corresponding to the rows returned by `self.connection_vector_matrix()`
roi_name:str
name of the ROI to be saved in the ROI column of the dat file
Returns
-------
results:list
a string row for each subject
"""
conn_ids, cvec = self.connection_vector_matrix()
region_pairs = ["%s.%s" % self.index_to_region_pairs[conn] \
for conn in conn_ids ]
header = "\t".join(
["subject","roi", "region.pair", "count"])
results = [ header ]
for subject_id,subject_data in zip(subject_ids,cvec):
for pair_id, pair_count in zip(region_pairs,subject_data):
results.append( "\t".join(
['"s%s"'%subject_id, roi_name, pair_id, "%.2f"%pair_count ]
))
return results
# widgets for editing algorithm parameters
algorithm_widgets = Group(
Item(name="min_tracks",
editor=RangeEditor(mode="slider",
high = 100,low = 0,format = "%i")),
Item("atlas_name", editor= EnumEditor(name="possible_atlases")),
Group(
Item("post_processor", style="simple"),
Item(name="b_plot_connection_vector"),
show_border=True,
show_labels=False
)
)
def query_region_pair(self):
"""
Opens a little GUI where you can select two regions.
Each TrackDataset is subset so that only streamlines connecting
that region-pair are visible.
"""
if len(self.track_source) == 0: return
# launch the ui and stop everything else
ui = self.region_pair_query.edit_traits()
if not ui.result:
print "canceled, exiting"
return
self.save_name = "%s__to__%s" % (
self.region_pair_query.region1,
self.region_pair_query.region2)
region_id = self.clusterer.get_region_pair_code(
self.region_pair_query.region1,
self.region_pair_query.region2)
if region_id is None:
print "region pair not found"
else:
self.render_region_pairs(region_id)
def render_region_pairs(self,region_id):
self.clusterer.clear_clusters()
self.clusterer.set_track_sets(
self.track_source.query_connection_id(region_id,
every=self.downsample))
# --- Rendering ---
self.scene3d.disable_render = True
self.clear_track_glyphs()
self.clusterer.update_clusters()
self.clusterer.draw_tracks()
class RegionPair(HasTraits):
possible_regions = List
region1 = Str
region2 = Str
selected_connection = Int
def update_regions(self, clusterer):
"""Updates ``self.possible_regions`` based on the clusterer and
sets an arbitrary region pair
"""
print "updating possible regions"
self.possible_regions = sorted([
clusterer.region_labels[str(id1)]['dn_name'] \
for id1 in clusterer.regions ])
if not len(self.possible_regions): return
self.region1 = self.possible_regions[0]
self.region2 = self.possible_regions[0]
traits_view = View(Group(
Item("region1",editor=EnumEditor(name="possible_regions")),
Item("region2",editor=EnumEditor(name="possible_regions")),
orientation="vertical"
),
kind="modal",
buttons=[OKButton,CancelButton]
)
|
gpl-3.0
|
huzq/scikit-learn
|
sklearn/discriminant_analysis.py
|
3
|
30056
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
import warnings
import numpy as np
from scipy import linalg
from scipy.special import expit
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model._base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .utils.extmath import softmax
from .preprocessing import StandardScaler
from .utils.validation import _deprecate_positional_args
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(LinearClassifierMixin,
TransformerMixin,
BaseEstimator):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions, using the
`transform` method.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : {'svd', 'lsqr', 'eigen'}, default='svd'
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array-like of shape (n_classes,), default=None
The class prior probabilities. By default, the class proportions are
inferred from the training data.
n_components : int, default=None
Number of components (<= min(n_classes - 1, n_features)) for
dimensionality reduction. If None, will be set to
min(n_classes - 1, n_features). This parameter only affects the
`transform` method.
store_covariance : bool, default=False
If True, explicitely compute the weighted within-class covariance
matrix when solver is 'svd'. The matrix is always computed
and stored for the other solvers.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value of X to be considered
significant, used to estimate the rank of X. Dimensions whose
singular values are non-significant are discarded. Only used if
solver is 'svd'.
.. versionadded:: 0.17
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : ndarray of shape (n_classes,)
Intercept term.
covariance_ : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix. It corresponds to
`sum_k prior_k * C_k` where `C_k` is the covariance matrix of the
samples in class `k`. The `C_k` are estimated using the (potentially
shrunk) biased estimator of covariance. If solver is 'svd', only
exists when `store_covariance` is True.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like of shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
Only available for 'svd' and 'eigen' solvers.
xbar_ : array-like of shape (n_features,)
Overall mean. Only present if solver is 'svd'.
classes_ : array-like of shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, *, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, Vt = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
# Scaling of within covariance is: V' 1/S
scalings = (Vt[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, Vt = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, Vt.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
"""
X, y = self._validate_data(X, y, ensure_min_samples=2, estimator=self,
dtype=[np.float64, np.float32])
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError("The number of samples must be more "
"than the number of classes.")
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, "
"n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2,
dtype=X.dtype)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1, dtype=X.dtype)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = expit(decision)
return np.vstack([1-proba, proba]).T
else:
return softmax(decision)
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
prediction = self.predict_proba(X)
prediction[prediction == 0.0] += np.finfo(prediction.dtype).tiny
return np.log(prediction)
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
# Only override for the doc
return super().decision_function(X)
class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : ndarray of shape (n_classes,), default=None
Class priors. By default, the class proportions are inferred from the
training data.
reg_param : float, default=0.0
Regularizes the per-class covariance estimates by transforming S2 as
``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,
where S2 corresponds to the `scaling_` attribute of a given class.
store_covariance : bool, default=False
If True, the class covariance matrices are explicitely computed and
stored in the `self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value to be considered significant,
used to estimate the rank of `Xk` where `Xk` is the centered matrix
of samples in class k. This parameter does not affect the
predictions. It only controls a warning that is raised when features
are considered to be colinear.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of len n_classes of ndarray \
of shape (n_features, n_features)
For each class, gives the covariance matrix estimated using the
samples of that class. The estimations are unbiased. Only present if
`store_covariance` is True.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)
For each class k an array of shape (n_features, n_k), where
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis. It corresponds to `V`, the matrix of eigenvectors
coming from the SVD of `Xk = U S Vt` where `Xk` is the centered
matrix of samples from class k.
scalings_ : list of len n_classes of ndarray of shape (n_k,)
For each class, contains the scaling of
the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system. It corresponds to `S^2 /
(n_samples - 1)`, where `S` is the diagonal matrix of singular values
from the SVD of `Xk`, where `Xk` is the centered matrix of samples
from class k.
classes_ : ndarray of shape (n_classes,)
Unique class labels.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
QuadraticDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
@_deprecate_positional_args
def __init__(self, *, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariance = store_covariance
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers)
"""
X, y = self._validate_data(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
_, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
# return log posterior, see eq (4.12) p. 110 of the ESL.
check_is_fitted(self)
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return log of posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
bsd-3-clause
|
xyguo/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
SAGES-UCSC/GC-CaT-Metallicitiy
|
interp.py
|
1
|
10305
|
#! /usr/bin/env python
'''
Created on Mar 17, 2011
@author: Chris Usher
'''
import numpy as np
#import matplotlib.pyplot as plt
import scipy.interpolate as interpolate
def redisperse(inputwavelengths, inputfluxes, firstWavelength=None, lastWavelength=None, dispersion=None, nPixels=None, outside=None, function='spline'):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
epsilon = 1e-10
if dispersion == None and nPixels != None:
if firstWavelength == None:
firstWavelength = inputwavelengths[0]
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
outputwavelengths = np.linspace(firstWavelength, lastWavelength, nPixels)
elif dispersion != None and nPixels == None:
if firstWavelength == None:
firstWavelength = inputwavelengths[0]
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
outputwavelengths = np.arange(firstWavelength, lastWavelength + epsilon, dispersion)
elif dispersion != None and nPixels != None:
if firstWavelength != None:
outputwavelengths = firstWavelength + dispersion * np.ones(nPixels)
elif lastWavelength != None:
outputwavelengths = lastWavelength - dispersion * np.ones(nPixels)
outputwavelengths = outputwavelengths[::-1]
else:
outputwavelengths = inputwavelengths[0] + dispersion * np.ones(nPixels)
else:
dispersion = (inputwavelengths[-1] - inputwavelengths[0]) / (inputwavelengths.size - 1)
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
if firstWavelength != None:
outputwavelengths = np.arange(firstWavelength, lastWavelength + epsilon, dispersion)
else:
outputwavelengths = np.arange(inputwavelengths[0], lastWavelength + epsilon, dispersion)
outputdispersion = outputwavelengths[1] - outputwavelengths[0]
outputedges = np.linspace(outputwavelengths[0] - outputdispersion / 2, outputwavelengths[-1] + outputdispersion / 2, outputwavelengths.size + 1)
outputfluxes = interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function)
return (outputwavelengths, outputfluxes)
def rebin(inputwavelengths, inputfluxes, outputwavelengths, outside=None, function='spline', ratio=False):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
outputedges = np.empty(outputwavelengths.size + 1)
outputedges[1:-1] = (outputwavelengths[1:] + outputwavelengths[:-1]) / 2
outputedges[0] = 3 * outputwavelengths[0] / 2 - outputwavelengths[1] / 2
outputedges[-1] = 3 * outputwavelengths[-1] / 2 - outputwavelengths[-2] / 2
return interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
def interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside=None, function='spline', ratio=False):
if not ratio:
fluxdensities = inputfluxes / inputdispersions.mean()
else:
fluxdensities = inputfluxes
outputfluxes = np.ones(outputwavelengths.size)
if outside != None:
outputfluxes = outputfluxes * outside
else:
middle = (outputwavelengths[0] + outputwavelengths[-1]) / 2
firstnew = None
lastnew = None
if function == 'nearest':
pixels = np.arange(0, inputfluxes.size)
for newpixel in range(outputfluxes.size):
if inputedges[0] <= outputwavelengths[newpixel] <= inputedges[-1]:
outputlowerlimit = outputedges[newpixel]
outputupperlimit = outputedges[newpixel + 1]
outputfluxes[newpixel] = 0
below = inputedges[1:] < outputlowerlimit
above = inputedges[:-1] > outputupperlimit
ok = ~(below | above)
for oldpixel in pixels[ok]:
inputlowerlimit = inputedges[oldpixel]
inputupperlimit = inputedges[oldpixel + 1]
if inputlowerlimit >= outputlowerlimit and inputupperlimit <= outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * inputdispersions[oldpixel]
elif inputlowerlimit < outputlowerlimit and inputupperlimit > outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (outputupperlimit - outputlowerlimit)
elif inputlowerlimit < outputlowerlimit and outputlowerlimit <= inputupperlimit <= outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (inputupperlimit - outputlowerlimit)
elif outputupperlimit >= inputlowerlimit >= outputlowerlimit and inputupperlimit > outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (outputupperlimit - inputlowerlimit)
if firstnew == None:
firstnew = outputfluxes[newpixel]
if ratio:
outputfluxes[newpixel] = outputfluxes[newpixel] / (outputupperlimit - outputlowerlimit)
elif outputwavelengths[newpixel] > inputwavelengths[-1] and lastnew == None:
lastnew = outputfluxes[newpixel - 1]
else:
fluxspline = interpolate.UnivariateSpline(inputwavelengths, fluxdensities, s=0, k=3)
for newpixel in range(outputfluxes.size):
if inputedges[0] <= outputwavelengths[newpixel] <= inputedges[-1]:
outputlowerlimit = outputedges[newpixel]
outputupperlimit = outputedges[newpixel + 1]
outputfluxes[newpixel] = fluxspline.integral(outputedges[newpixel], outputedges[newpixel + 1])
if firstnew == None:
firstnew = outputfluxes[newpixel]
if ratio:
outputfluxes[newpixel] = outputfluxes[newpixel] / (outputupperlimit - outputlowerlimit)
elif outputwavelengths[newpixel] > inputwavelengths[-1] and lastnew == None:
lastnew = outputfluxes[newpixel - 1]
if outside == None:
for newpixel in range(outputfluxes.size):
if outputwavelengths[newpixel] < inputwavelengths[0]:
outputfluxes[newpixel] = firstnew
elif outputwavelengths[newpixel] > inputwavelengths[-1]:
outputfluxes[newpixel] = lastnew
return outputfluxes
def lineartolog(inputwavelengths, inputfluxes, outside=0, function='spline', ratio=False, logDispersion=0):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
if logDispersion:
outputedges = np.arange(np.log10(inputedges[0]), np.log10(inputedges[-1]), logDispersion)
outputwavelengths = (outputedges[:-1] + outputedges[1:]) / 2
outputedges = 10**outputedges
outputwavelengths = 10**outputwavelengths
else:
outputedges = np.logspace(np.log10(inputedges[0]), np.log10(inputedges[-1]), inputedges.size)
outputwavelengths = (outputedges[:-1] * outputedges[1:])**.5
return outputwavelengths, interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
def logtolinear(inputwavelengths, inputfluxes, outside=0, function='spline', ratio=False):
logWavelengths = np.log10(inputwavelengths)
inputedges = np.empty(logWavelengths.size + 1)
inputedges[1:-1] = (logWavelengths[1:] + logWavelengths[:-1]) / 2
inputedges[0] = 3 * logWavelengths[0] / 2 - logWavelengths[1] / 2
inputedges[-1] = 3 * logWavelengths[-1] / 2 - logWavelengths[-2] / 2
inputedges = 10**inputedges
inputdispersions = inputedges[1:] - inputedges[:-1]
outputedges = np.linspace(inputedges[0], inputedges[-1], inputedges.size)
outputwavelengths = (outputedges[:-1] + outputedges[1:]) / 2
return outputwavelengths, interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
def log_redisperse(inputwavelengths, inputfluxes, firstWavelength, lastWavelength, dispersion, outside=0, function='spline', ratio=False):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
outputwavelengths = np.arange(np.log10(firstWavelength), np.log10(lastWavelength), dispersion)
outputedges = np.linspace(outputwavelengths[0] - dispersion / 2, outputwavelengths[-1] + dispersion / 2, outputwavelengths.size + 1)
outputedges = 10**outputedges
outputwavelengths = 10**outputwavelengths
return outputwavelengths, interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
#plt.show()
|
bsd-3-clause
|
bmazin/ARCONS-pipeline
|
photometry/plot3DImage.py
|
1
|
2500
|
#This code uses Popup to plot a 3D image
import warnings
from functools import partial
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from util.popup import *
def plot3DImage(fig,ax,data,errs=None,fit=None,surface=True,countour=False):
data[np.where(np.invert(data>0.))]=0.
x=np.tile(range(len(data[0])),(len(data),1))
y=np.tile(range(len(data)),(len(data[0]),1)).transpose()
ax = fig.add_subplot(111, projection='3d')
if surface:
ax.plot_surface(x, y, data, rstride=1, cstride=1, color='black',alpha=0.1)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",'invalid value encountered in divide',RuntimeWarning)
ax.bar3d(x.flatten(),y.flatten(),x.flatten()*0,1,1,data.flatten(),color='cyan',alpha=0.9)
if errs!=None:
ax.bar3d(x[np.where(np.invert(np.isfinite(errs)))].flatten(),y[np.where(np.invert(np.isfinite(errs)))].flatten(),x[np.where(np.invert(np.isfinite(errs)))].flatten()*0,1,1,data[np.where(np.invert(np.isfinite(errs)))].flatten(),color='red',alpha=0.9)
if countour:
#cset = ax.contourf(x, y, data, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contourf(x, y, data, zdir='x', offset=0., cmap=cm.coolwarm)
cset = ax.contourf(x, y, data, zdir='y', offset=0., cmap=cm.coolwarm)
else:
ax.plot(range(len(data[0])+1),np.zeros(len(data[0])+1),zs=np.concatenate(([0],np.amax(data,0))),zdir='z',color='blue',drawstyle='steps')
ax.plot(np.zeros(len(data)+1),range(len(data)+1),zs=np.concatenate(([0],np.amax(data,1))),zdir='z',color='blue',drawstyle='steps')
if fit!=None:
ax.plot_wireframe(x+0.5, y+0.5, fit, rstride=1, cstride=1, color='red')
ax.plot(np.asarray(range(len(data[0])))+0.5,np.zeros(len(data[0])),zs=np.amax(fit,0),zdir='z',color='red')
ax.plot(np.zeros(len(data)),np.asarray(range(len(data)))+0.5,zs=np.amax(fit,1),zdir='z',color='red')
ax.set_zlim(0, np.amax(data))
ax.set_xlim(0,np.amax(x))
ax.set_ylim(0,np.amax(y))
cid = fig.canvas.mpl_connect('scroll_event', partial(scroll3D,fig,ax))
def scroll3D(fig,ax,event):
increment = 0.05
currentZlim = ax.get_zlim()[1]
if event.button == 'up':
newZlim = currentZlim-increment*currentZlim
if event.button == 'down':
newZlim = currentZlim+increment*currentZlim
if newZlim < 10:
newZlim=10
ax.set_zlim(0,newZlim)
fig.canvas.draw()
|
gpl-2.0
|
pygeo/geoval
|
tests/test_grid.py
|
1
|
1263
|
# -*- coding: utf-8 -*-
"""
This file is part of GEOVAL.
(c) 2016- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import matplotlib
matplotlib.use('Agg')
import sys
sys.path.append('..')
import unittest
from geoval import grid
import numpy as np
class TestGeovalGrid(unittest.TestCase):
def setUp(self):
self.lon = np.linspace(-120., 130.)
self.lat = np.linspace(-30., 30.)
self.grid = grid.Grid(np.deg2rad(self.lat), np.deg2rad(self.lon), sphere_radius=6000000.)
def test_DummyTest(self):
pass
def test_grid_init(self):
with self.assertRaises(ValueError):
G = grid.Grid(np.deg2rad(self.lat), np.deg2rad(self.lon))
G = grid.Grid(np.deg2rad(self.lat), np.deg2rad(self.lon), sphere_radius=6000000.)
def test_grid_cellarea(self):
with self.assertRaises(ValueError):
self.grid.calc_cell_area()
def test_grid_plot(self):
self.grid.plot()
def test_grid_plot_voronoi(self):
self.grid.plot_voronoi()
def test_grid_plot_delaunay(self):
self.grid.plot_delaunay_grid()
def test_grid_draw_edge(self):
self.grid.draw_edges()
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
RuthAngus/kalesalad
|
code/identify_dwarfs.py
|
1
|
4436
|
# Find the giants in the TGAS/EPIC crossmatched catalog and remove them from
# the list.
# Saves a .csv file of the tgas_epic.csv catalogue with the giants removed and
# stars of very high and low temperatures removed.
# I have not made a cut in parallax error.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import isochrones
# from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.mist import MIST_Isochrone
from scipy.interpolate import interp1d
plotpar = {'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True}
plt.rcParams.update(plotpar)
def abs_mag_w_MC_uncertainty(mean_m, m_err, mean_p, parallax_err, N):
values = np.vstack((mean_m + np.random.randn(N)*m_err,
mean_p + np.random.randn(N)*parallax_err)).T
abs_mags = [abs_mag(m, parallax) for m, parallax in values]
mean = np.mean(abs_mags)
lower, upper = np.percentile(abs_mags, 16), np.percentile(abs_mags, 84)
return mean, mean - lower, upper - mean
def abs_mag(m, parallax):
return m - 5 * np.log10(1./parallax) + 5
def CMD_plot(colour, abs_mag):
"""
Plot and save a CMD.
"""
counts, xbins, ybins = np.histogram2d(colour, abs_jmag, bins=100)
plt.clf()
lower_lim, upper_lim = -.2, 1.5
m = (lower_lim < colour) * (colour < upper_lim)
plt.scatter(colour[m], abs_jmag[m], c=colour[m], s=2)
# plt.contour(xbins[:-1], ybins[:-1], counts, colors='black');
plt.contour(counts.transpose(),
extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()],
linewidths=.5, colors='white', linestyles='solid')
plt.colorbar(label="$J - K$")
plt.xlabel("$J - K$")
plt.ylabel("$M_J$")
plt.ylim(22, 8)
plt.xlim(lower_lim, upper_lim)
plt.subplots_adjust(left=.15, bottom=.15)
plt.savefig("CMD")
def HRD_plot(teff, abs_gmag):
# dar = Dartmouth_Isochrone()
mist = MIST_Isochrone()
iso_300 = mist.isochrone(age=np.log10(300e6), feh=0.0, AV=0.0)
counts, xbins, ybins = np.histogram2d(teff, abs_gmag, bins=100)
# Make temperature cuts
lower_lim, upper_lim = 3200, 8000
m = (lower_lim < teff) * (teff < upper_lim)
plt.clf()
# Plot points + contours
plt.scatter(teff[m], abs_gmag[m], c=teff[m], s=2, cmap="viridis_r",
zorder=0)
plt.colorbar(label="$T_{\mathrm{eff}}$")
plt.contour(counts.transpose(),
extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()],
linewidths=.5, colors='white', linestyles='solid')
# Plot isochrones
plt.plot(iso_300.Teff[:100], iso_300.G_mag[:100]+1, "k", lw=.5)
plt.plot(iso_300.Teff[:100], iso_300.G_mag[:100]-1, "k", lw=.5)
# Select stars between isochrones
fup = interp1d(iso_300.Teff[:100], iso_300.G_mag[:100] + 1)
flo = interp1d(iso_300.Teff[:100], iso_300.G_mag[:100] - 1)
inds = []
for i, G in enumerate(abs_gmag[m]):
upper_G_at_this_teff = fup(teff[m][i])
lower_G_at_this_teff = flo(teff[m][i])
if (lower_G_at_this_teff < G) and (G < upper_G_at_this_teff):
inds.append(i)
# plt.scatter(teff[m][inds], abs_gmag[m][inds], c="k", s=2, zorder=1)
plt.xlabel("$T_{\mathrm{eff}}$")
plt.ylabel("$M_G$")
plt.ylim(10, -7)
plt.xlim(upper_lim, lower_lim)
plt.subplots_adjust(left=.15, bottom=.15)
plt.savefig("HRD")
plt.savefig("HRD.pdf")
return m, inds
if __name__ == "__main__":
# import isochrones.dartmouth
# isochrones.dartmouth.download_grids()
# Plot a CMD.
df = pd.read_csv("epic_tgas.csv")
# Calculate colours and magnitudes
abs_jmag = abs_mag(df.k2_jmag.values, df.tgas_parallax.values*1e-3)
colour = df.k2_jmag.values - df.k2_kmag.values
# Remove NaNs
m = np.isfinite(colour) * np.isfinite(abs_jmag)
colour, abs_jmag = colour[m], abs_jmag[m]
CMD_plot(colour, abs_jmag)
teff = df.k2_teff.values
abs_gmag = abs_mag(df.tgas_phot_g_mean_mag.values,
df.tgas_parallax.values*1e-3)
m = np.isfinite(teff) * np.isfinite(abs_gmag)
teff, abs_gmag = teff[m], abs_gmag[m]
teff_cut, inds = HRD_plot(teff, abs_gmag)
df_temp_cuts = df.iloc[teff_cut]
df_dwarfs = df_temp_cuts.iloc[inds]
df_dwarfs.to_csv("tgas_epic_dwarfs.csv")
|
mit
|
emon10005/sympy
|
sympy/external/tests/test_importtools.py
|
91
|
1215
|
from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
|
bsd-3-clause
|
lsbardel/zipline
|
zipline/sources/data_frame_source.py
|
6
|
4633
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.columns)
self.start = kwargs.get('start', data.index[0])
self.end = kwargs.get('end', data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
'price': price,
'volume': 1000,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.items)
self.start = kwargs.get('start', data.major_axis[0])
self.end = kwargs.get('end', data.major_axis[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
|
apache-2.0
|
chebee7i/twitter
|
scripts/counter/notes.py
|
1
|
5689
|
"""
Begin date: 2014-04-25 08:00 UTC
End date: 2015-04-25 08:00 UTC
Basic stats and counts for each hashtag are stored in an sqlite3 database:
hashtag_counts.db. The tweets were binned at a temporal resolution of 5
minutes. This means each hashtag has roughly 100k bins associated to it.
There is a single table "hashtags" which was constructed as:
CREATE TABLE IF NOT EXISTS hashtags (
hashtag text unique,
nnz_bins int,
range_bins int,
min_bin int,
max_bin int,
wmean_bin real,
total_counts int,
mean_counts real,
std_counts real,
nonzero_mean_counts real,
nonzero_std_counts real,
counts text
);
Descriptions:
hashtag
The hashtag. Two special hashtags are included: "#world" and "#us".
Note that this is safe since hashtags are not allowed to have "#" mark.
So there is no conflict with the hashtags "world" and "us". Hashtags
are stored in lowercase, but not necessarily in English.
nnz_bins
The number of 5-minute bins that have nonzero counts.
range_bins
The difference between the latest and earliest nonzero bins.
min_bin
The earliest nonzero bin.
max_bin
The latest nonzero bin.
wmean_bin
The weighted mean of nonzero bins: \sum_i w_i b_i / \sum_j w_j.
This weights each bin by the number of counts in that bin.
total_counts
The total number of times the hashtag was mentioned in the data.
mean_counts
The mean count per bin.
std_counts
The standard deviation of counts per bin with ddof=0.
mean_counts
The mean count per nonzero bin.
std_counts
The standard deviation of counts per nonzero bin with ddof=0.
counts
A string representing a JSON object (similar to a Python dictionary)
that maps bin indexes to count values. In Python, you'd run:
import json
c = json.loads(counts)
"""
from __future__ import division
import datetime
import json
import sqlite3
from pytz import utc
import numpy as np
START_DATE = utc.localize(datetime.datetime(2014, 04, 25, 8))
STOP_DATE = utc.localize(datetime.datetime(2015, 04, 25, 8))
n_days = (STOP_DATE - START_DATE).days
resolution = 5 # in minutes
slots = int(n_days * 24 * 60 / resolution)
DELTA = datetime.timedelta(minutes=resolution)
DATES = [START_DATE + i * DELTA for i in range(slots)]
DB = 'hashtag_counts.db'
conn = sqlite3.connect(DB)
def fetch(hashtag, dense=True):
"""
Returns the stored data for a given hashtag.
If dense is True, the counts are returned as a NumPy array instead of
a dictionary.
"""
select = """SELECT * FROM hashtags WHERE hashtag = ?"""
with conn:
stored = conn.execute(select, (hashtag,)).fetchone()
if stored is None:
return None
out = list(stored)
counts = out[-1]
d = json.loads(counts)
indexes = map(int, d.keys())
counts = d.values()
d = dict(zip(indexes, counts))
if dense:
x = np.array([ d.get(idx, 0) for idx in range(slots) ])
out[-1] = x
return tuple(out)
def plot_usworld():
"""
Plots counts as a function of days.
"""
import matplotlib.pyplot as plt
import seaborn
us = fetch('#us', dense=True)[-1]
world = fetch('#world', dense=True)[-1]
days = DATES[::12 * 24]
us_daily = [arr.sum() for arr in np.array_split(us, slots / (12 * 24))]
world_daily = [arr.sum() for arr in np.array_split(world, slots / (12 * 24))]
f, ax = plt.subplots()
ax.plot(days[-200:], us_daily[-200:], label='US')
ax.plot(days[-200:], world_daily[-200:], label='World')
ax.set_title('Geotagged Hashtags')
ax.set_ylabel('Count')
ax.set_xlabel('Time')
f.autofmt_xdate()
plt.legend(loc='best')
plt.savefig('hashtag_counts.pdf')
def fetch_counts(pre, repeater, post, repeats=50):
counts = {}
for k in range(1, repeats):
hashtag = pre + k * repeater + post
data = fetch(hashtag, dense=True)
if data:
print hashtag, data[6]
counts[k] = data[6]
return counts
def plot_repeaters(hashtags, kmax=20):
"""
Plots the counts repeating symbols over different hashtags.
hashtags should be a list of 3-tuples, each representing a family of
hashtags. The 3-tuple should specify the pre, repeater, and post portion.
"""
import matplotlib.pyplot as plt
import seaborn
plt.rcParams['text.usetex'] = False
counts = []
max_k = 0
for hashtag in hashtags:
c = fetch_counts(*hashtag)
mx = max(list(c.keys()))
if mx > max_k:
max_k = mx
counts.append(c)
# Make counts dense
max_k = min(kmax, max_k)
kvals = range(1, max_k + 1)
counts = [ [c.get(k, 0) for k in kvals] for c in counts ]
f, ax = plt.subplots()
for i, hashtag in enumerate(hashtags):
if len(hashtag[1]) > 1:
label = '${}({})^k{}$'.format(*hashtag)
else:
label = '${}{}^k{}$'.format(*hashtag)
ax.plot(kvals, counts[i], marker='o', label=label, alpha=.5, clip_on=True)
ax.set_title("Repeated symbols in hashtags")
ax.set_yscale('log')
ax.set_ylabel('Counts')
ax.set_xlabel('k')
plt.legend(loc='best')
plt.savefig('hashtag_repeats.pdf')
def main():
plot_usworld()
"""
hashtags = [
('ye', 's', ''),
('n', 'o', ''),
('w', 'o', 'w'),
('wo', 'w', ''),
('', 'ha', ''),
('', 'jk', ''),
]
plot_repeaters(hashtags)
"""
if __name__ == '__main__':
main()
|
unlicense
|
lenck/vlb
|
python/bench/Utils.py
|
1
|
13763
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===========================================================
# File Name: Util.py
# Author: Xu Zhang, Columbia University
# Creation Date: 01-25-2019
# Last Modified: Mon Apr 15 14:51:27 2019
#
# Description: Writing and printing functions
#
# Copyright (C) 2018 Xu Zhang
# All rights reserved.
#
# This file is made available under
# the terms of the BSD license (see the COPYING file).
# ===========================================================
import numpy as np
import os
import csv
from tqdm import tqdm
import cv2
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import plotly.plotly as py
import matplotlib.pyplot as plt
def draw_sequence_result(results, sequence_name, term_to_show,
figure_num=1, result_dir='./python_scores/'):
if len(results) == 0:
return
sequence_index = -1
result = results[0]
for idx, sequence_result in enumerate(result['sequence_result']):
if sequence_name == sequence_result['sequence_name']:
sequence_index = idx
if sequence_index < 0:
print("No {} sequence in the results!".format(sequence_name))
return
link_id_list = result['sequence_result'][sequence_index]['result_link_id_list']
sorted_index = sorted(
range(
len(link_id_list)),
key=link_id_list.__getitem__)
#link_id_list = link_id_list[sorted_index]
link_id_list = [link_id_list[i] for i in sorted_index]
score_list = []
detector_list = []
for result in results:
# print(result['sequence_result'][sequence_index]['sequence_name'])
if result['sequence_result'][sequence_index]['sequence_name'] != sequence_name:
print(
"{} doesn't have the result for sequence {}.".format(
result['detector_name'],
sequence_name))
continue
detector_list.append(result['detector_name'])
cur_score_list = []
for idx, sorted_idx in enumerate(sorted_index):
if result['sequence_result'][sequence_index]['result_link_id_list'][sorted_idx] == link_id_list[idx]:
cur_score_list.append(
result['sequence_result'][sequence_index][term_to_show][sorted_idx])
else:
print(
'Detector {} miss link {} for sequence {}'.format(
result['detector_name'],
link_id_list[idx],
sequence_name))
score_list.append(cur_score_list)
print(score_list)
color = ['r', 'g', 'b', 'k', 'y', 'c']
plt.figure(figure_num) # the first figure
for idx, score in enumerate(score_list):
plt.plot(range(len(score)), score, color[idx % len(color)])
plt.title('{}-{}({})'.format(term_to_show,
results[0]['dataset_name'], sequence_name))
plt.xticks(range(len(score)), link_id_list, rotation=45)
plt.xlabel('label')
plt.ylabel(term_to_show)
plt.legend(detector_list, loc='upper right')
plt.savefig('{}{}/{}/{}_{}_result.png'.format(result_dir,
results[0]['bench_name'], results[0]['dataset_name'], sequence_name, term_to_show))
plt.clf()
def draw_feature(dataset, sequence_name, image_idx, detector, use_cache=True, figure_num=1,
tmp_feature_dir='./features/', result_dir='./python_image/'):
image = dataset.get_image(sequence_name, image_idx)
feature_file_name = '{}{}/{}/{}_{}_frame'.format(tmp_feature_dir,
dataset.name, detector.name, sequence_name, image_idx)
get_feature_flag = False
if use_cache:
try:
feature = np.load(feature_file_name + '.npy')
get_feature_flag = True
except BaseException:
get_feature_flag = False
if not get_feature_flag:
feature = detector.detect_feature(image)
kp_list = [cv2.KeyPoint(p[1], p[0], p[2], p[3]) for p in feature]
draw_image = np.copy(image)
#draw_image = draw_image[...,::-1]
#draw_image = draw_image.copy()
if len(draw_image.shape) == 3:
draw_image = (draw_image[..., ::-1]).copy()
try:
os.makedirs('{}{}/{}/'.format(result_dir, dataset.name, detector.name))
except BaseException:
pass
draw_image = cv2.drawKeypoints(
draw_image,
kp_list,
draw_image,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
image_file_name = '{}{}/{}/{}_{}_frame.png'.format(result_dir,
dataset.name, detector.name, sequence_name, image_idx)
cv2.imwrite(image_file_name, draw_image)
def print_sequence_result(results, sequence_name, term_to_show):
if len(results) == 0:
return
sequence_index = -1
result = results[0]
for idx, sequence_result in enumerate(result['sequence_result']):
if sequence_name == sequence_result['sequence_name']:
sequence_index = idx
if sequence_index < 0:
print("No {} sequence in the results!".format(sequence_name))
return
print("")
print(
"Dataset: {}, Sequence: {}".format(
results[0]['dataset_name'],
sequence_name))
print("Metric: {}".format(term_to_show))
results_str_list = get_sequence_str_list(
results, sequence_name, term_to_show)
print_table(results_str_list)
def print_result(results, term_to_show):
if len(results) == 0:
return
print("")
print("Dataset: {}".format(results[0]['dataset_name']))
print("Metric: {}".format(term_to_show))
results_str_list = get_str_list(results, term_to_show)
print_table(results_str_list)
def print_retrieval_result(results, term_to_show):
if len(results) == 0:
return
print("")
print("Dataset: {}".format(results[0]['dataset_name']))
print("Metric: {}".format(term_to_show))
results_str_list = get_retrieval_str_list(results, term_to_show)
print_retrieval_table(results_str_list)
def print_retrieval_table(content_list):
if len(content_list) == 0:
return
max_detector_name_len = 8
max_sequence_name_len = 6
for content in content_list:
if len(content[0]) > max_detector_name_len:
max_detector_name_len = len(content[0])
content = content_list[0][1:]
for sequence_name in content:
if len(sequence_name) > max_sequence_name_len:
max_sequence_name_len = len(sequence_name)
content = content_list[0]
title_str = ''
for idx, this_str in enumerate(content):
if idx == 0:
title_str = "|{}|".format(
this_str.ljust(max_detector_name_len)[
:max_detector_name_len])
else:
title_str = title_str + \
"{}|".format(
this_str.ljust(max_sequence_name_len)[
:max_sequence_name_len])
print('-' * len(title_str))
print(title_str)
print('-' * len(title_str))
content_str = ''
for content in content_list[1:]:
for idx, this_str in enumerate(content):
if idx == 0:
content_str = "|{}|".format(
this_str.ljust(max_detector_name_len)[
:max_detector_name_len])
else:
content_str = content_str + \
"{}|".format(
this_str.ljust(max_sequence_name_len)[
:max_sequence_name_len])
print(content_str)
print('-' * len(title_str))
def print_table(content_list):
if len(content_list) == 0:
return
max_detector_name_len = 8
max_sequence_name_len = 6
for content in content_list:
if len(content[0]) > max_detector_name_len:
max_detector_name_len = len(content[0])
content = content_list[0][1:]
for sequence_name in content:
if len(sequence_name) > max_sequence_name_len:
max_sequence_name_len = len(sequence_name)
content = content_list[0]
title_str = ''
for idx, this_str in enumerate(content):
if idx == 0:
title_str = "|{}|".format(
this_str.ljust(max_detector_name_len)[
:max_detector_name_len])
else:
title_str = title_str + \
"{}|".format(
this_str.ljust(max_sequence_name_len)[
:max_sequence_name_len])
print('-' * len(title_str))
print(title_str)
print('-' * len(title_str))
content_str = ''
for content in content_list[1:]:
for idx, this_str in enumerate(content):
if idx == 0:
content_str = "|{}|".format(
this_str.ljust(max_detector_name_len)[
:max_detector_name_len])
else:
content_str = content_str + \
"{}|".format(
this_str.ljust(max_sequence_name_len)[
:max_sequence_name_len])
print(content_str)
print('-' * len(title_str))
def save_result(results, term_to_show, result_dir='./python_scores/'):
result_file_csv = csv.writer(open('{}{}/{}/{}_result.csv'.format(result_dir,
results[0]['bench_name'], results[0]['dataset_name'], term_to_show), 'w'), delimiter=',')
results_str_list = get_str_list(results, term_to_show)
for this_str in results_str_list:
result_file_csv.writerow(this_str)
def save_sequence_result(results, sequence_name,
term_to_show, result_dir='./python_scores/'):
if len(results) == 0:
return
sequence_index = -1
result = results[0]
for idx, sequence_result in enumerate(result['sequence_result']):
if sequence_name == sequence_result['sequence_name']:
sequence_index = idx
if sequence_index < 0:
print("No {} sequence in the results!".format(sequence_name))
return
result_file_csv = csv.writer(open('{}{}/{}/{}_{}_result.csv'.format(result_dir,
results[0]['bench_name'], results[0]['dataset_name'], sequence_name, term_to_show), 'w'), delimiter=',')
results_str_list = get_sequence_str_list(
results, sequence_name, term_to_show)
for this_str in results_str_list:
result_file_csv.writerow(this_str)
def save_retrieval_result(results, term_to_show,
result_dir='./python_scores/'):
result_file_csv = csv.writer(open('{}{}/{}/{}_result.csv'.format(result_dir,
results[0]['bench_name'], results[0]['dataset_name'], term_to_show), 'w'), delimiter=',')
results_str_list = get_retrieval_str_list(results, term_to_show)
for this_str in results_str_list:
result_file_csv.writerow(this_str)
def get_str_list(results, term_to_show):
results_str_list = []
title_str = []
title_str.append('Detector')
result = results[0]
for sequence_result in result['sequence_result']:
title_str.append(sequence_result['sequence_name'])
title_str.append('Ave')
results_str_list.append(title_str)
for result in results:
write_str = []
write_str.append(result['detector_name'])
for sequence_result in result['sequence_result']:
write_str.append(
str(sequence_result['ave_{}'.format(term_to_show)]))
write_str.append(str(result['ave_{}'.format(term_to_show)]))
results_str_list.append(write_str)
return results_str_list
def get_sequence_str_list(results, sequence_name, term_to_show):
sequence_index = -1
result = results[0]
for idx, sequence_result in enumerate(result['sequence_result']):
if sequence_name == sequence_result['sequence_name']:
sequence_index = idx
results_str_list = []
title_str = []
title_str.append('Detector')
link_id_list = sequence_result['result_link_id_list']
sorted_index = sorted(
range(
len(link_id_list)),
key=link_id_list.__getitem__)
link_id_list = [link_id_list[i] for i in sorted_index]
for link_id in link_id_list:
title_str.append(str(link_id))
title_str.append('Ave')
results_str_list.append(title_str)
for result in results:
write_str = []
write_str.append(result['detector_name'])
sequence_result = result['sequence_result'][sequence_index]
link_id_list = sequence_result['result_link_id_list']
sorted_index = sorted(
range(len(link_id_list)),
key=link_id_list.__getitem__)
for sorted_idx in sorted_index:
write_str.append(str(sequence_result[term_to_show][sorted_idx]))
write_str.append(str(sequence_result['ave_{}'.format(term_to_show)]))
results_str_list.append(write_str)
return results_str_list
def get_retrieval_str_list(results, term_to_show):
results_str_list = []
title_str = []
title_str.append('Detector')
result = results[0]
title_str.append(term_to_show)
results_str_list.append(title_str)
for result in results:
write_str = []
write_str.append(result['detector_name'])
write_str.append(str(result[term_to_show]))
results_str_list.append(write_str)
return results_str_list
|
bsd-2-clause
|
schets/scikit-learn
|
benchmarks/bench_plot_ward.py
|
290
|
1260
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
cpsnowden/ComputationalNeurodynamics
|
Exercise_2/Run2L.py
|
3
|
2347
|
"""
Computational Neurodynamics
Exercise 2
Simulates two layers of Izhikevich neurons. Layer 0 is stimulated
with a constant base current and layer 1 receives synaptic input
of layer 0.
(C) Murray Shanahan et al, 2015
"""
from Connect2L import Connect2L
import numpy as np
import matplotlib.pyplot as plt
N1 = 4
N2 = 4
T = 500 # Simulation time
Ib = 5 # Base current
net = Connect2L(N1, N2)
## Initialise layers
for lr in xrange(len(net.layer)):
net.layer[lr].v = -65 * np.ones(net.layer[lr].N)
net.layer[lr].u = net.layer[lr].b * net.layer[lr].v
net.layer[lr].firings = np.array([])
v1 = np.zeros([T, N1])
v2 = np.zeros([T, N2])
u1 = np.zeros([T, N1])
u2 = np.zeros([T, N2])
## SIMULATE
for t in xrange(T):
# Deliver a constant base current to layer 1
net.layer[0].I = Ib * np.ones(N1)
net.layer[1].I = np.zeros(N2)
net.Update(t)
v1[t] = net.layer[0].v
v2[t] = net.layer[1].v
u1[t] = net.layer[0].u
u2[t] = net.layer[1].u
## Retrieve firings and add Dirac pulses for presentation
firings1 = net.layer[0].firings
firings2 = net.layer[1].firings
if firings1.size != 0:
v1[firings1[:, 0], firings1[:, 1]] = 30
if firings2.size != 0:
v2[firings2[:, 0], firings2[:, 1]] = 30
## Plot membrane potentials
plt.figure(1)
plt.subplot(211)
plt.plot(range(T), v1)
plt.title('Population 1 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.subplot(212)
plt.plot(range(T), v2)
plt.title('Population 2 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.xlabel('Time (ms)')
## Plot recovery variable
plt.figure(2)
plt.subplot(211)
plt.plot(range(T), u1)
plt.title('Population 1 recovery variables')
plt.ylabel('Voltage (mV)')
plt.subplot(212)
plt.plot(range(T), u2)
plt.title('Population 2 recovery variables')
plt.ylabel('Voltage (mV)')
plt.xlabel('Time (ms)')
## Raster plots of firings
if firings1.size != 0:
plt.figure(3)
plt.subplot(211)
plt.scatter(firings1[:, 0], firings1[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N1+1)
plt.title('Population 1 firings')
if firings2.size != 0:
plt.subplot(212)
plt.scatter(firings2[:, 0], firings2[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N2+1)
plt.xlabel('Time (ms)')
plt.title('Population 2 firings')
plt.show()
|
gpl-3.0
|
facom/AstrodynTools
|
tides/util.py
|
1
|
3377
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from constants import *
from numpy import *
from matplotlib.pyplot import *
from sys import exit
###################################################
#CONSTANTS
###################################################
DEG=pi/180
RAD=180/pi
###################################################
#NUMERIC
###################################################
#P2 Legendre Polynomial
def P2(psi):
p=0.5*(3*cos(psi)**2-1)
return p
###################################################
#PHYSICAL
###################################################
#Deformed Radius
def R2(Rp,eps2,theta):
R=Rp*(1+eps2*P2(theta))
return R
#Interior Potential
def Vint(r,theta,R,rho,eps2):
V=-4*pi/3*R**3*rho*Gconst*((3*R**2-r**2)/(2*R**3)+3./5*(r**2/R**3)*eps2*P2(theta))
return V
#Exterior Potential
def Vext(r,theta,R,rho,eps2):
V=-4*pi/3*R**3*rho*Gconst*(1/r+3./5*R**2/r**3*eps2*P2(theta))
return V
#Equilibrium Tide
def Csi(Ms,Mp,Rp,a):
csi=(Ms/Mp)*(Rp/a)**3*Rp
return csi
#Tide potential
def V3(r,theta,Ms,Mp,Rp,a):
g=Gconst*Mp/Rp**2
csi=Csi(Ms,Mp,Rp,a)
V=-csi*g*(r/Rp)**2*P2(theta)
return V
#Contornos
def contourPotential(V,R,range=(-1,1),levels='none'):
N=range[2]
X=linspace(range[0]*R,range[1]*R,N)
Y=linspace(range[0]*R,range[1]*R,N)
XM,YM=meshgrid(X,Y)
VM=zeros((N,N))
for i in xrange(N):
x=X[i]
for j in xrange(N):
y=Y[j]
theta=arctan(y/x)
r=sqrt(x**2+y**2)
VM[j,i]=V(r,theta)
if levels!='none':args=dict(levels=levels)
else:args=dict()
contourf(XM/R,YM/R,VM,**args)
###################################################
#TEST CODE
###################################################
if __name__=='__main__':
Ms=Mmoon
Mp=Mearth
Rp=Rearth
rho=rhoearth
a=rmoon
eps2=0.2
#TEST RD
figure(figsize=(6,6))
theta=linspace(0,2*pi,100)
R=R2(Rp,eps2,theta)
x=R*cos(theta)
y=R*sin(theta)
plot(x/Rp,y/Rp,'r-')
xc=Rp*cos(theta)
yc=Rp*sin(theta)
plot(xc/Rp,yc/Rp,'b--')
xlim((-1.5,1.5))
ylim((-1.5,1.5))
savefig("test-Rd.png")
#TEST CSI
csi=Csi(Ms,Mp,Rp,a)
print "Equilibrium tide: %e"%csi
#TEST V3
r=Rp
theta=linspace(0,pi,100)
figure()
plot(theta*RAD,V3(r,theta,Ms,Mp,Rp,a),'k-')
savefig("test-V3.png")
#TEST VINT, VEXT
figure()
R=Rp
for theta in 0,30,60,90:
theta*=DEG
rint=linspace(0,R,100)
rext=linspace(R,2*R,100)
Vi=Vint(rint,theta,Rp,rho,eps2)
Ve=Vext(rext,theta,Rp,rho,eps2)
line=plot(rint/Rp,Vi,'-',label="%s"%(theta*RAD))
color=line[0].get_color()
plot(rext/Rp,Ve,'-',color=color)
axvline(1.0,color='k')
legend(loc='best')
xlabel('$r/R_p$')
ylabel('$V$ (j/kg)')
savefig("test-VintVext.png")
###################################################
#VECTOR
###################################################
def vec2to3(r):
return concatenate((r,[0]))
def dot2(r1,r2):
r1_3=concatenate((r1,[0]))
r2_3=concatenate((r2,[0]))
return dot(r1_3,r2_3)
def cross2(r1,r2):
r1_3=concatenate((r1,[0]))
r2_3=concatenate((r2,[0]))
c=cross(r1_3,r2_3)
return c[1:]
def magvec(r):
return dot(r,r)**0.5
|
gpl-2.0
|
HofmannZ/global-ai-hackathon-coin-truth
|
src/nlp/Sentiment_Analysis.py
|
1
|
3698
|
'''
Author: Giovanni Kastanja
Python: 3.6.0
Performing sentiment analysis on a piece of text
!Important!:
install nltk
in commandline use command: 'nltk.download' to download
- vader, under the models ta
ToDo:
- add learning for the sentiment analysis
ToDo
There isn't data for doing the sentiment analysis, so for know we will work with dummy data
'''
# for sentiment intensity classification
from nltk.corpus import subjectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import word_tokenize
from nltk.sentiment.util import *
# for time insights
from profilehooks import coverage, timecall
# for sentiment classification
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
POSITIVE = list()
NEGATIVE = list()
BUZZWORDS = dict()
class Sentiment(object):
"""docstring for Sentiment"""
version = '0.0.1'
def __init__(self, classifier, data):
'''
In what format is the information passed:
- json with metadata?
- array with words?
assume that data passed is in JSON, but
for now we assume that the passed data is a String
data = String()
'''
self.text = data
self.sentiment = self.sent_analysis(classifier, data)
self.sentiment_intensity = self.sentiment_intensity_analysis(data)
# then we tokenize the piece of text,
@timecall
def _tokenize_text(self, text):
return word_tokenize(text)
# do some preprocessing on the text
def _preprocessing(self):
# remove punctuation
# remove capital letters
# word_error handling?
pass
# do the sentiment analysis
@timecall
def sent_analysis(self, classifier, text):
'''
This function will classify a piece of text,
based on the classifier that is passed
returns:
- returns the class the text will be classified in according to the trained
predictor
'''
text_array = np.array([text])
text_vector = vectorizer.transform(text_array)
return classifier.predict(text_vector)
@timecall
def sentiment_intensity_analysis(self, text):
'''
Calculates the intensity of the text passed as argument
parameters:
text (String), a string of the text we want to analyze
returns:
sentiment_intensity (dict), a dict contaning the different intensity-scores of the text
'''
sid = SentimentIntensityAnalyzer()
sentiment_intensity = sid.polarity_scores(text)
return sentiment_intensity
def __repr__(self):
return """
Sentiment_obj: text:{} sentiment:{} sentiment intensity:{}
""".format(self.text, self.sentiment, self.sentiment_intensity)
# return the dominant sentiment of the piece of text
# train the sentiment classifier
def train_sentiment_classifier(trainingtext):
'''
trains a naive bayes classifier to train on sentiment.
parameters:
- trainingtext(.csv/.txt), needs to be annotated
'''
df = pd.read_csv('training.txt', sep='\t', names=['liked', 'txt'])
# vectorize words
stopset = set(stopwords.words('english'))
vectorizer = TfidfVectorizer(use_idf=True, lowercase=True, strip_accents='ascii', stop_words=stopset)
# target
y = df.liked
# samples
X = vectorizer.fit_transform(df.txt)
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# train the naive bayes classifier
clf = naive_bayes.MultinomialNB()
clf.fit(X_train, y_train)
return clf
|
mit
|
dshen1/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
unsiloai/syntaxnet-ops-hack
|
tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
|
27
|
1592
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.estimator.inputs.queues.feeding_functions import _ArrayFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data
from tensorflow.python.estimator.inputs.queues.feeding_functions import _GeneratorFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _OrderedDictNumpyFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _PandasFeedFn
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
@deprecated('2017-06-15', 'Moved to tf.contrib.training.enqueue_data.')
def enqueue_data(*args, **kwargs):
return _enqueue_data(*args, **kwargs)
|
apache-2.0
|
SamStudio8/scikit-bio
|
skbio/draw/_distributions.py
|
10
|
30987
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
from itertools import cycle
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon, Rectangle
import six
from skbio.util._decorator import deprecated
distribution_plot_deprecation_p = {
'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
"Plots that are not specific to bioinformatics should be generated "
"with seaborn or another general-purpose plotting package."
)}
@deprecated(**distribution_plot_deprecation_p)
def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
x_label=None, y_label=None, x_tick_labels_orientation='vertical',
y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
box_colors=None, figure_width=None, figure_height=None,
legend=None):
"""Generate a figure with a boxplot for each distribution.
Parameters
----------
distributions: 2-D array_like
Distributions to plot. A boxplot will be created for each distribution.
x_values : list of numbers, optional
List indicating where each boxplot should be placed. Must be the same
length as `distributions` if provided.
x_tick_labels : list of str, optional
List of x-axis tick labels.
title : str, optional
Title of the plot.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of the x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
Length of the whiskers as a function of the IQR. For example, if 1.5,
the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
treated as an outlier.
box_width : scalar, optional
Width of each box in plot units.
box_colors : str, tuple, or list of colors, optional
Either a matplotlib-compatible string or tuple that indicates the color
to be used for every boxplot, or a list of colors to color each boxplot
individually. If ``None``, boxes will be the same color as the plot
background. If a list of colors is provided, a color must be provided
for each boxplot. Can also supply ``None`` instead of a color, which
will color the box the same color as the plot background.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
legend : tuple or list, optional
Two-element tuple or list that contains a list of valid matplotlib
colors as the first element and a list of labels (strings) as the
second element. The lengths of the first and second elements must be
the same. If ``None``, a legend will not be plotted.
Returns
-------
matplotlib.figure.Figure
Figure containing a boxplot for each distribution.
See Also
--------
matplotlib.pyplot.boxplot
scipy.stats.ttest_ind
Notes
-----
This is a convenience wrapper around matplotlib's ``boxplot`` function that
allows for coloring of boxplots and legend generation.
Examples
--------
Create a plot with two boxplots:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
Plot three distributions with custom colors and labels:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots(
... [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
... x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
... box_colors=('green', 'blue', 'red'))
"""
distributions = _validate_distributions(distributions)
num_dists = len(distributions)
_validate_x_values(x_values, x_tick_labels, num_dists)
# Create a new figure to plot our data on, and then plot the distributions.
fig, ax = plt.subplots()
box_plot = plt.boxplot(distributions, positions=x_values,
whis=whisker_length, widths=box_width)
if box_colors is not None:
if _is_single_matplotlib_color(box_colors):
box_colors = [box_colors] * num_dists
_color_box_plot(ax, box_plot, box_colors)
# Set up the various plotting options, such as x- and y-axis labels, plot
# title, and x-axis values if they have been supplied.
_set_axes_options(ax, title, x_label, y_label,
x_tick_labels=x_tick_labels,
x_tick_labels_orientation=x_tick_labels_orientation,
y_min=y_min, y_max=y_max)
if legend is not None:
if len(legend) != 2:
raise ValueError("Invalid legend was provided. The legend must be "
"a two-element tuple/list where the first "
"element is a list of colors and the second "
"element is a list of labels.")
_create_legend(ax, legend[0], legend[1], 'colors')
_set_figure_size(fig, figure_width, figure_height)
return fig
@deprecated(**distribution_plot_deprecation_p)
def grouped_distributions(plot_type, data, x_values=None,
data_point_labels=None, distribution_labels=None,
distribution_markers=None, x_label=None,
y_label=None, title=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None, whisker_length=1.5,
error_bar_type='stdv', distribution_width=None,
figure_width=None, figure_height=None):
"""Generate a figure with distributions grouped at points along the x-axis.
Parameters
----------
plot_type : {'bar', 'scatter', 'box'}
Type of plot to visualize distributions with.
data : list of lists of lists
Each inner list represents a data point along the x-axis. Each data
point contains lists of data for each distribution in the group at that
point. This nesting allows for the grouping of distributions at each
data point.
x_values : list of scalars, optional
Spacing of data points along the x-axis. Must be the same length as the
number of data points and be in ascending sorted order. If not
provided, plots will be spaced evenly.
data_point_labels : list of str, optional
Labels for data points.
distribution_labels : list of str, optional
Labels for each distribution in a data point grouping.
distribution_markers : list of str or list of tuple, optional
Matplotlib-compatible strings or tuples that indicate the color or
symbol to be used to distinguish each distribution in a data point
grouping. Colors will be used for bar charts or box plots, while
symbols will be used for scatter plots.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
title : str, optional
Plot title.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
If `plot_type` is ``'box'``, determines the length of the whiskers as a
function of the IQR. For example, if 1.5, the whiskers extend to
``1.5 * IQR``. Anything outside of that range is seen as an outlier.
If `plot_type` is not ``'box'``, this parameter is ignored.
error_bar_type : {'stdv', 'sem'}
Type of error bars to use if `plot_type` is ``'bar'``. Can be either
``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
of the mean. If `plot_type` is not ``'bar'``, this parameter is
ignored.
distribution_width : scalar, optional
Width in plot units of each individual distribution (e.g. each bar if
the plot type is a bar chart, or the width of each box if the plot type
is a boxplot). If None, will be automatically determined.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
Returns
-------
matplotlib.figure.Figure
Figure containing distributions grouped at points along the x-axis.
Examples
--------
Create a plot with two distributions grouped at three points:
.. plot::
>>> from skbio.draw import grouped_distributions
>>> fig = grouped_distributions('bar',
... [[[2, 2, 1,], [0, 1, 4]],
... [[1, 1, 1], [4, 4.5]],
... [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
... distribution_labels=['Treatment 1',
... 'Treatment 2'])
"""
# Set up different behavior based on the plot type.
if plot_type == 'bar':
plotting_function = _plot_bar_data
distribution_centered = False
marker_type = 'colors'
elif plot_type == 'scatter':
plotting_function = _plot_scatter_data
distribution_centered = True
marker_type = 'symbols'
elif plot_type == 'box':
plotting_function = _plot_box_data
distribution_centered = True
marker_type = 'colors'
else:
raise ValueError("Invalid plot type '%s'. Supported plot types are "
"'bar', 'scatter', or 'box'." % plot_type)
num_points, num_distributions = _validate_input(data, x_values,
data_point_labels,
distribution_labels)
# Create a list of matplotlib markers (colors or symbols) that can be used
# to distinguish each of the distributions. If the user provided a list of
# markers, use it and loop around to the beginning if there aren't enough
# markers. If they didn't provide a list, or it was empty, use our own
# predefined list of markers (again, loop around to the beginning if we
# need more markers).
distribution_markers = _get_distribution_markers(marker_type,
distribution_markers,
num_distributions)
# Now calculate where each of the data points will start on the x-axis.
x_locations = _calc_data_point_locations(num_points, x_values)
assert (len(x_locations) == num_points), "The number of x_locations " +\
"does not match the number of data points."
if distribution_width is None:
# Find the smallest gap between consecutive data points and divide this
# by the number of distributions + 1 for some extra spacing between
# data points.
min_gap = max(x_locations)
for i in range(len(x_locations) - 1):
curr_gap = x_locations[i + 1] - x_locations[i]
if curr_gap < min_gap:
min_gap = curr_gap
distribution_width = min_gap / float(num_distributions + 1)
else:
if distribution_width <= 0:
raise ValueError("The width of a distribution cannot be less than "
"or equal to zero.")
result, plot_axes = plt.subplots()
# Iterate over each data point, and plot each of the distributions at that
# data point. Increase the offset after each distribution is plotted,
# so that the grouped distributions don't overlap.
for point, x_pos in zip(data, x_locations):
dist_offset = 0
for dist_index, dist, dist_marker in zip(range(num_distributions),
point, distribution_markers):
dist_location = x_pos + dist_offset
plotting_function(plot_axes, dist, dist_marker, distribution_width,
dist_location, whisker_length, error_bar_type)
dist_offset += distribution_width
# Set up various plot options that are best set after the plotting is done.
# The x-axis tick marks (one per data point) are centered on each group of
# distributions.
plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
num_distributions,
distribution_width,
distribution_centered))
_set_axes_options(plot_axes, title, x_label, y_label, x_values,
data_point_labels, x_tick_labels_orientation, y_min,
y_max)
if distribution_labels is not None:
_create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type)
_set_figure_size(result, figure_width, figure_height)
# matplotlib seems to sometimes plot points on the rightmost edge of the
# plot without adding padding, so we need to add our own to both sides of
# the plot. For some reason this has to go after the call to draw(),
# otherwise matplotlib throws an exception saying it doesn't have a
# renderer. Boxplots need extra padding on the left.
if plot_type == 'box':
left_pad = 2 * distribution_width
else:
left_pad = distribution_width
plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
plot_axes.get_xlim()[1] + distribution_width)
return result
def _validate_distributions(distributions):
dists = []
for distribution in distributions:
try:
distribution = np.asarray(distribution, dtype=float)
except ValueError:
raise ValueError("Each value in each distribution must be "
"convertible to a number.")
# Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
# ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
# https://github.com/matplotlib/matplotlib/pull/3571). In order to
# support empty distributions across mpl versions, we replace them with
# [np.nan]. See https://github.com/pydata/pandas/issues/8382,
# https://github.com/matplotlib/matplotlib/pull/3571, and
# https://github.com/pydata/pandas/pull/8240 for details.
# If we decide to only support mpl > 1.4.0 in the future, this code can
# likely be removed in favor of letting mpl handle empty distributions.
if distribution.size > 0:
dists.append(distribution)
else:
dists.append(np.array([np.nan]))
return dists
def _validate_input(data, x_values, data_point_labels, distribution_labels):
"""Returns a tuple containing the number of data points and distributions
in the data.
Validates plotting options to make sure they are valid with the supplied
data.
"""
if data is None or not data or isinstance(data, six.string_types):
raise ValueError("The data must be a list type, and it cannot be "
"None or empty.")
num_points = len(data)
num_distributions = len(data[0])
empty_data_error_msg = ("The data must contain at least one data "
"point, and each data point must contain at "
"least one distribution to plot.")
if num_points == 0 or num_distributions == 0:
raise ValueError(empty_data_error_msg)
for point in data:
if len(point) == 0:
raise ValueError(empty_data_error_msg)
if len(point) != num_distributions:
raise ValueError("The number of distributions in each data point "
"grouping must be the same for all data points.")
# Make sure we have the right number of x values (one for each data point),
# and make sure they are numbers.
_validate_x_values(x_values, data_point_labels, num_points)
if (distribution_labels is not None and
len(distribution_labels) != num_distributions):
raise ValueError("The number of distribution labels must be equal "
"to the number of distributions.")
return num_points, num_distributions
def _validate_x_values(x_values, x_tick_labels, num_expected_values):
"""Validates the x values provided by the user, making sure they are the
correct length and are all numbers.
Also validates the number of x-axis tick labels.
Raises a ValueError if these conditions are not met.
"""
if x_values is not None:
if len(x_values) != num_expected_values:
raise ValueError("The number of x values must match the number "
"of data points.")
try:
list(map(float, x_values))
except:
raise ValueError("Each x value must be a number.")
if x_tick_labels is not None:
if len(x_tick_labels) != num_expected_values:
raise ValueError("The number of x-axis tick labels must match the "
"number of data points.")
def _get_distribution_markers(marker_type, marker_choices, num_markers):
"""Returns a list of length num_markers of valid matplotlib colors or
symbols.
The markers will be comprised of those found in marker_choices (if not None
and not empty) or a list of predefined markers (determined by marker_type,
which can be either 'colors' or 'symbols'). If there are not enough
markers, the list of markers will be reused from the beginning again (as
many times as are necessary).
"""
if num_markers < 0:
raise ValueError("num_markers must be greater than or equal to zero.")
if marker_choices is None or len(marker_choices) == 0:
if marker_type == 'colors':
marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
elif marker_type == 'symbols':
marker_choices = \
['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
if len(marker_choices) < num_markers:
# We don't have enough markers to represent each distribution uniquely,
# so let the user know. We'll add as many markers (starting from the
# beginning of the list again) until we have enough, but the user
# should still know because they may want to provide a new list of
# markers.
warnings.warn(
"There are not enough markers to uniquely represent each "
"distribution in your dataset. You may want to provide a list "
"of markers that is at least as large as the number of "
"distributions in your dataset.",
RuntimeWarning)
marker_cycle = cycle(marker_choices[:])
while len(marker_choices) < num_markers:
marker_choices.append(next(marker_cycle))
return marker_choices[:num_markers]
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
distribution_centered):
"""Returns a 1D numpy array of x-axis tick positions.
These positions will be centered on each data point.
Set distribution_centered to True for scatter and box plots because their
plot types naturally center over a given horizontal position. Bar charts
should use distribution_centered = False because the leftmost edge of a bar
starts at a given horizontal position and extends to the right for the
width of the bar.
"""
dist_size = num_distributions - 1 if distribution_centered else\
num_distributions
return x_locations + ((dist_size * distribution_width) / 2)
def _plot_bar_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single bar in matplotlib."""
result = None
# We do not want to plot empty distributions because matplotlib will not be
# able to render them as PDFs.
if len(distribution) > 0:
avg = np.mean(distribution)
if error_bar_type == 'stdv':
error_bar = np.std(distribution)
elif error_bar_type == 'sem':
error_bar = np.std(distribution) / np.sqrt(len(distribution))
else:
raise ValueError(
"Invalid error bar type '%s'. Supported error bar types are "
"'stdv' and 'sem'." % error_bar_type)
result = plot_axes.bar(x_position, avg, distribution_width,
yerr=error_bar, ecolor='black',
facecolor=distribution_color)
return result
def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single scatterplot in matplotlib."""
result = None
x_vals = [x_position] * len(distribution)
# matplotlib's scatter function doesn't like plotting empty data.
if len(x_vals) > 0 and len(distribution) > 0:
result = plot_axes.scatter(x_vals, distribution,
marker=distribution_symbol, c='k')
return result
def _plot_box_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single boxplot in matplotlib."""
result = None
if len(distribution) > 0:
result = plot_axes.boxplot([distribution], positions=[x_position],
widths=distribution_width,
whis=whisker_length)
_color_box_plot(plot_axes, result, [distribution_color])
return result
def _is_single_matplotlib_color(color):
"""Returns True if color is a single (not a list) mpl color."""
single_color = False
if (isinstance(color, six.string_types)):
single_color = True
elif len(color) == 3 or len(color) == 4:
single_color = True
for e in color:
if not (isinstance(e, float) or isinstance(e, int)):
single_color = False
return single_color
def _color_box_plot(plot_axes, box_plot, colors):
"""Color boxes in the box plot with the specified colors.
If any of the colors are None, the box will not be colored.
The box_plot argument must be the dictionary returned by the call to
matplotlib's boxplot function, and the colors argument must consist of
valid matplotlib colors.
"""
# Note: the following code is largely taken from this matplotlib boxplot
# example:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# boxplot_demo2.html
num_colors = len(colors)
num_box_plots = len(box_plot['boxes'])
if num_colors != num_box_plots:
raise ValueError("The number of colors (%d) does not match the number "
"of boxplots (%d)." % (num_colors, num_box_plots))
for box, median, color in zip(box_plot['boxes'],
box_plot['medians'],
colors):
if color is not None:
box_x = []
box_y = []
# There are five points in the box. The first is the same as
# the last.
for i in range(5):
box_x.append(box.get_xdata()[i])
box_y.append(box.get_ydata()[i])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor=color)
plot_axes.add_patch(box_polygon)
# Draw the median lines back over what we just filled in with
# color.
median_x = []
median_y = []
for i in range(2):
median_x.append(median.get_xdata()[i])
median_y.append(median.get_ydata()[i])
plot_axes.plot(median_x, median_y, 'black')
def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
x_values=None, x_tick_labels=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None):
"""Applies various labelling options to the plot axes."""
if title is not None:
plot_axes.set_title(title)
if x_label is not None:
plot_axes.set_xlabel(x_label)
if y_label is not None:
plot_axes.set_ylabel(y_label)
if (x_tick_labels_orientation != 'vertical' and
x_tick_labels_orientation != 'horizontal'):
raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
"Valid orientations are 'vertical' or 'horizontal'."
% x_tick_labels_orientation)
# If labels are provided, always use them. If they aren't, use the x_values
# that denote the spacing between data points as labels. If that isn't
# available, simply label the data points in an incremental fashion,
# i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
if x_tick_labels is not None:
plot_axes.set_xticklabels(x_tick_labels,
rotation=x_tick_labels_orientation)
elif x_tick_labels is None and x_values is not None:
plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
else:
plot_axes.set_xticklabels(
range(1, len(plot_axes.get_xticklabels()) + 1),
rotation=x_tick_labels_orientation)
# Set the y-axis range if specified.
if y_min is not None:
plot_axes.set_ylim(bottom=float(y_min))
if y_max is not None:
plot_axes.set_ylim(top=float(y_max))
def _create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type):
"""Creates a legend on the supplied axes."""
# We have to use a proxy artist for the legend because box plots currently
# don't have a very useful legend in matplotlib, and using the default
# legend for bar/scatterplots chokes on empty/null distributions.
#
# Note: This code is based on the following examples:
# http://matplotlib.sourceforge.net/users/legend_guide.html
# http://stackoverflow.com/a/11423554
if len(distribution_markers) != len(distribution_labels):
raise ValueError("The number of distribution markers does not match "
"the number of distribution labels.")
if marker_type == 'colors':
legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, loc='best')
elif marker_type == 'symbols':
legend_proxy = [Line2D(range(1), range(1), color='white',
markerfacecolor='black', marker=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
scatterpoints=3, loc='best')
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
def _set_figure_size(fig, width=None, height=None):
"""Sets the plot figure size and makes room for axis labels, titles, etc.
If both width and height are not provided, will use matplotlib defaults.
Making room for labels will not always work, and if it fails, the user will
be warned that their plot may have cut-off labels.
"""
# Set the size of the plot figure, then make room for the labels so they
# don't get cut off. Must be done in this order.
if width is not None and height is not None and width > 0 and height > 0:
fig.set_size_inches(width, height)
try:
fig.tight_layout()
except ValueError:
warnings.warn(
"Could not automatically resize plot to make room for "
"axes labels and plot title. This can happen if the labels or "
"title are extremely long and the plot size is too small. Your "
"plot may have its labels and/or title cut-off. To fix this, "
"try increasing the plot's size (in inches) and try again.",
RuntimeWarning)
|
bsd-3-clause
|
davidgbe/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
69
|
8605
|
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
bsd-3-clause
|
bzero/statsmodels
|
statsmodels/graphics/mosaicplot.py
|
20
|
26989
|
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be "
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _get_position(x, w, h, W):
if W == 0:
return x
return (x + w / 2.0) * w * h / W
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum(_get_position(x, w, h, W) for (x, y, w, h) in vals)
y_lab = sum(_get_position(y, h, w, W) for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
|
bsd-3-clause
|
schets/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
wronk/mne-python
|
mne/viz/montage.py
|
11
|
1801
|
"""Functions to plot EEG sensor montages or digitizer montages
"""
import numpy as np
from .utils import plt_show
def plot_montage(montage, scale_factor=1.5, show_names=False, show=True):
"""Plot a montage
Parameters
----------
montage : instance of Montage
The montage to visualize.
scale_factor : float
Determines the size of the points. Defaults to 1.5.
show_names : bool
Whether to show the channel names. Defaults to False.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure object.
"""
from ..channels.montage import Montage, DigMontage
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if isinstance(montage, Montage):
pos = montage.pos
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
ch_names = montage.ch_names
for ch_name, x, y, z in zip(ch_names, pos[:, 0],
pos[:, 1], pos[:, 2]):
ax.text(x, y, z, ch_name)
elif isinstance(montage, DigMontage):
pos = np.vstack((montage.hsp, montage.elp))
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
if montage.point_names:
hpi_names = montage.point_names
for hpi_name, x, y, z in zip(hpi_names, montage.elp[:, 0],
montage.elp[:, 1],
montage.elp[:, 2]):
ax.text(x, y, z, hpi_name)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt_show(show)
return fig
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/073cd329c76032781ae018af6b775d9b/plot_decoding_unsupervised_spatial_filter.py
|
29
|
2496
|
"""
==================================================================
Analysis of evoked response using ICA and PCA reduction techniques
==================================================================
This example computes PCA and ICA of evoked or epochs data. Then the
PCA / ICA components, a.k.a. spatial filters, are used to transform
the channel data to new sources / virtual channels. The output is
visualized on the average of all the epochs.
"""
# Authors: Jean-Remi King <[email protected]>
# Asish Panda <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.decoding import UnsupervisedSpatialFilter
from sklearn.decomposition import PCA, FastICA
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
X = epochs.get_data()
##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev.plot(show=False, window_title="PCA", time_unit='s')
##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev1.plot(show=False, window_title='ICA', time_unit='s')
plt.show()
|
bsd-3-clause
|
zorroblue/scikit-learn
|
sklearn/datasets/__init__.py
|
61
|
3734
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
EnboYang/eebybay
|
eeofbatse.py
|
1
|
4697
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Project:BB Analysis of BATSE GRB
# <markdowncell>
# This script is used to do Bayesian Block analysis with the ascii file of BATSE GRB
#
# Author:Enbo Yang([email protected])
#
# Lisence:GPLv2
# <headingcell level=2>
# Module preparing
# <codecell>
import numpy as np
import matplotlib.pyplot as plt
from astroML.plotting import hist as bbhist
# <headingcell level=2>
# Get the Trigger Num
# <codecell>
trig=raw_input('Please input the tigger:')
# <headingcell level=2>
# Create file to save the channel data
# <codecell>
chnum=1
while chnum<=4:
f=open("%s.time.CH%s"%(trig,chnum),'w')
f.close()
chnum+=1
# <headingcell level=2>
# Split data of each channel
# <codecell>
#This part is used to split channel data of the ascii file of a burst
'''
Because of the bug in numpy.genfromtxt( cannot read a file with differential rows to a array), so I use numpy.fromstring instead.
timedata: First array in ascii file, stand for the photon arrival time(need to be redit here!)
chdata: Second array in ascii file, stand for the channel information.
totalno: The number of photons that arrived the detectored
'''
timedata=np.fromstring(''.join(open('%s.time'%trig,'r').read().splitlines()),sep=' ') # CAUTIONS: Need to give the time file manually
chdata=np.fromstring(''.join(open('%s.channel'%trig,'r').read().splitlines()),sep=' ')
(totalno,)=np.shape(timedata)
n=0 # n is just a temporary variable here, maybe need to be changed if conflict
while n<totalno:
f=open("%s.time.CH%i"%(trig,chdata[n]),'a')
f.write("%f"%timedata[n]+'\n')
f.close()
n+=1
# <headingcell level=2>
# Do Bayesian Block analysis
# <markdowncell>
# During the analysis, we add Knuth bins ,Scott bins and Freedman bins as comparision.
# <headingcell level=3>
# total data
# <codecell>
#We use standard histogram as background, then Knuth bins & Bayesian block
fig,axes = plt.subplots(2,1,figsize=(12,6))
axes[0].hist(timedata/1000,bins=64,color='blue',histtype='step',label='64ms')
axes[0].set_title('bins=64')
axes[0].set_xlabel('Time/second')
axes[0].set_ylabel('Count')
axes[0].legend(loc='best')
axes[1].hist(timedata/1000,bins=1000,color='blue',histtype='step',normed='True',label='bins=1s')
bbhist(timedata/1000,bins='blocks',color='red',histtype='step',normed='True',label='bayesian')
axes[1].legend(loc='best')
axes[1].set_title('Bayesian block')
axes[1].set_xlabel('Time/second')
axes[1].set_ylabel('Count Rate($s^{-1}$)')
#axes[1].set_ylim([0,0.001])
fig.tight_layout()
fig.savefig('%s.png'%trig,dpi=200)
# <headingcell level=2>
# Channel data
# <headingcell level=3>
# Read Channel data from file
# <codecell>
ch1time=np.genfromtxt('%s.time.CH1'%trig)
ch2time=np.genfromtxt('%s.time.CH2'%trig)
ch3time=np.genfromtxt('%s.time.CH3'%trig)
ch4time=np.genfromtxt('%s.time.CH4'%trig)
(count1,)=ch1time.shape
(count2,)=ch2time.shape
(count3,)=ch3time.shape
(count4,)=ch4time.shape
ch1time=np.reshape(ch1time,[count1,1])
ch2time=np.reshape(ch2time,[count2,1])
ch3time=np.reshape(ch3time,[count3,1])
ch4time=np.reshape(ch4time,[count4,1])
# <headingcell level=3>
# Plot data in one Picture
# <codecell>
fig=plt.figure(figsize=(15,11))
axes=plt.subplot(2,2,1)
axes.hist(ch1time/1000,bins=128,color='blue',normed='True',histtype='step',label='bins=1s')
bbhist(ch1time/1000,bins='blocks',color='red',normed='True',histtype='step',label='bayesian')
axes.legend(loc='best')
axes.set_title('Channel 1')
axes.set_xlabel('Time/second')
axes.set_ylabel('Count Rate($s^{-1}$)')
axes=plt.subplot(2,2,2)
axes.hist(ch2time/1000,bins=64,color='blue',normed='True',histtype='step',label='bins=1s')
bbhist(ch2time/1000,bins='blocks',color='red',normed='True',histtype='step',label='bayesian')
axes.legend(loc='best')
axes.set_title('Channel 2')
axes.set_xlabel('Time/second')
axes.set_ylabel('Count Rate($s^{-1}$)')
axes=plt.subplot(2,2,3)
axes.hist(ch3time/1000,bins=64,color='blue',normed='True',histtype='step',label='bins=1s')
bbhist(ch3time/1000,bins='blocks',color='red',normed='True',histtype='step',label='bayesian')
axes.legend(loc='best')
axes.set_title('Channel 3')
axes.set_xlabel('Time/second')
axes.set_ylabel('Count Rate($s^{-1}$)')
axes=plt.subplot(2,2,4)
axes.hist(ch4time/1000,bins=64,color='blue',normed='True',histtype='step',label='bins=1s')
bbhist(ch4time/1000,bins='blocks',color='red',normed='True',histtype='step',label='bayesian')
axes.legend(loc='best')
axes.set_title('Channel 4')
axes.set_xlabel('Time/second')
axes.set_ylabel('Count Rate($s^{-1}$)')
fig.tight_layout()
fig.savefig('%sCH.png'%trig,dpi=200)
# <codecell>
|
gpl-2.0
|
mjirik/quantan
|
setup.py
|
2
|
3556
|
# Fallowing command is used to upload to pipy
# bumpversion patch
# python setup.py register sdist upload
from setuptools import setup, find_packages
# Always prefer setuptools over distutils
from os import path
__VERSION__ = '0.0.27'
here = path.abspath(path.dirname(__file__))
setup(
name='quantan',
description='Quantitative histological analyser',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version=__VERSION__,
url='https://github.com/mjirik/quanta',
author='Miroslav Jirik and Pavel Volkovinsky',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='dicom 3D read write',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['numpy', 'scipy', "pyyaml", 'matplotlib', 'skelet3d', 'imtools', 'sed3', "pysegbase", "io3d"],
# 'SimpleITK'], # Removed becaouse of errors when pip is installing
dependency_links=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'quantan': ['icon/icon.png'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
mit
|
icdishb/scikit-learn
|
examples/bicluster/bicluster_newsgroups.py
|
42
|
7098
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals import six
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(six.iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in xrange(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
bsd-3-clause
|
richlewis42/scikit-chem
|
skchem/test/test_core/test_bond.py
|
1
|
3412
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <[email protected]>
# License: 3-clause BSD
import pytest
import numpy as np
import pandas as pd
from ...core import bond, Mol
from . import example_mol #provides 'm' fixture
@pytest.fixture(name='b')
def example_bond(m):
return m.bonds[0]
@pytest.fixture(name='bwp')
def example_bond_with_props(b):
b.props['test'] = 'value'
return b
def test_len(m):
assert len(m.bonds) == 6
def test_out_of_range(m):
with pytest.raises(IndexError):
m.bonds[100]
def test_reverse_index(m):
assert m.bonds[-1].order == 1
def test_slice(m):
assert len(m.bonds[[1, 4]]) == 2
def test_repr(b):
assert repr(b) == '<Bond type="O-C" at {}>'.format(hex(id(b)))
def test_owner(m):
# rdkit gives a copy of the object, so cant test for identity
assert m.bonds[0].owner.to_smiles() == m.to_smiles()
def test_index(b):
assert b.index == 0
def test_to_dict(b):
assert b.to_dict() == {'b': 0, 'e':1, 'o': 1}
def test_index(m):
assert m.bonds.index.equals(pd.RangeIndex(6, name='bond_idx'))
def test_all_params_on_view():
params = list(bond.Bond.__dict__.keys())
for param in ('__doc__', '__repr__', '__str__', '__module__', 'atoms',
'props', 'owner', 'draw', 'to_dict'):
params.remove(param)
for param in params:
assert hasattr(bond.BondView, param)
def test_atoms(b):
assert len(b.atoms) == 2
def test_atom_idxs(b):
assert b.atom_idxs == (0, 1)
test_data = [
('order', [1, 2, 1, 1, 1, 1]),
('stereo_symbol', ['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE']),
('is_in_ring', [False, False, False, False, False, False]),
('is_conjugated', [True, True, False, False, False, False]),
('is_aromatic', [False, False, False, False, False, False])
]
params = pytest.mark.parametrize('param, expected', test_data)
@pytest.fixture(name='m_a')
def exampole_aromatic_mol():
return Mol.from_smiles('c1ccNc1')
arom_test_data = [
('order', [1.5, 1.5, 1.5, 1.5, 1.5]),
('stereo_symbol', ['NONE', 'NONE', 'NONE', 'NONE', 'NONE']),
('is_in_ring', [True, True, True, True, True]),
('is_conjugated', [True, True, True, True, True]),
('is_aromatic', [True, True, True, True, True])
]
arom_params = pytest.mark.parametrize('param, expected', arom_test_data)
@params
def test_params_on_bond_view(m, param, expected):
assert np.array_equal(getattr(m.bonds, param), expected)
@arom_params
def test_arom_params(m_a, param, expected):
assert np.array_equal(getattr(m_a.bonds, param), expected)
@params
def test_params_on_bonds(m, param, expected):
res = np.array([getattr(b, param) for b in m.bonds])
assert np.array_equal(res, expected)
def test_props_keys_empty(b):
assert len(b.props.keys()) == 0
def test_props_len_empty(b):
assert len(b.props) == 0
def test_props_keys_full(bwp):
assert len(bwp.props.keys()) == 1
assert bwp.props.keys()[0] == 'test'
def test_props_len_full(bwp):
assert len(bwp.props) == 1
def test_edge_adj(m):
assert np.array_equal(m.bonds.adjacency_matrix(), np.array([
[0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 1, 0]]))
def test_atom_idx_view(m):
assert m.bonds.atom_idxs.shape == (len(m.bonds), 2)
|
bsd-3-clause
|
JsNoNo/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
225
|
10791
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
jhamman/xarray
|
xarray/core/groupby.py
|
1
|
32122
|
import datetime
import functools
import warnings
import numpy as np
import pandas as pd
from . import dtypes, duck_array_ops, nputils, ops
from .arithmetic import SupportsArithmetic
from .common import ImplementsArrayReduce, ImplementsDatasetReduce
from .concat import concat
from .formatting import format_array_flat
from .options import _get_keep_attrs
from .pycompat import integer_types
from .utils import (
either_dict_or_kwargs,
hashable,
is_scalar,
maybe_wrap_array,
peek_at,
safe_cast_to_index,
)
from .variable import IndexVariable, Variable, as_variable
def check_reduce_dims(reduce_dims, dimensions):
if reduce_dims is not ...:
if is_scalar(reduce_dims):
reduce_dims = [reduce_dims]
if any([dim not in dimensions for dim in reduce_dims]):
raise ValueError(
"cannot reduce over dimensions %r. expected either '...' to reduce over all dimensions or one or more of %r."
% (reduce_dims, dimensions)
)
def unique_value_groups(ar, sort=True):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
sort : boolean, optional
Whether or not to sort unique values.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=sort)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.data_vars.items()
},
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims
},
xarray_obj.attrs,
)
elif isinstance(xarray_obj, DataArray):
res = DataArray(
dtypes.get_fill_value(xarray_obj.dtype),
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims
},
dims=[],
name=xarray_obj.name,
attrs=xarray_obj.attrs,
)
else: # pragma: no cover
raise AssertionError
return res
def _is_one_or_none(obj):
return obj == 1 or obj is None
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError("list element is not a slice: %r" % slice_)
if (
result
and last_slice.stop == slice_.start
and _is_one_or_none(last_slice.step)
and _is_one_or_none(slice_.step)
):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result
def _inverse_permutation_indices(positions):
"""Like inverse_permutation, but also handles slices.
Parameters
----------
positions : list of np.ndarray or slice objects.
If slice objects, all are assumed to be slices.
Returns
-------
np.ndarray of indices or None, if no permutation is necessary.
"""
if not positions:
return None
if isinstance(positions[0], slice):
positions = _consolidate_slices(positions)
if positions == slice(None):
return None
positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]
indices = nputils.inverse_permutation(np.concatenate(positions))
return indices
class _DummyGroup:
"""Class for keeping track of grouped dimensions without coordinates.
Should not be user visible.
"""
__slots__ = ("name", "coords", "size")
def __init__(self, obj, name, coords):
self.name = name
self.coords = coords
self.size = obj.sizes[name]
@property
def dims(self):
return (self.name,)
@property
def ndim(self):
return 1
@property
def values(self):
return range(self.size)
@property
def shape(self):
return (self.size,)
def __getitem__(self, key):
if isinstance(key, tuple):
key = key[0]
return self.values[key]
def _ensure_1d(group, obj):
if group.ndim != 1:
# try to stack the dims of the group into a single dim
orig_dims = group.dims
stacked_dim = "stacked_" + "_".join(orig_dims)
# these dimensions get created by the stack operation
inserted_dims = [dim for dim in group.dims if dim not in group.coords]
# the copy is necessary here, otherwise read only array raises error
# in pandas: https://github.com/pydata/pandas/issues/12813
group = group.stack(**{stacked_dim: orig_dims}).copy()
obj = obj.stack(**{stacked_dim: orig_dims})
else:
stacked_dim = None
inserted_dims = []
return group, obj, stacked_dim, inserted_dims
def _unique_and_monotonic(group):
if isinstance(group, _DummyGroup):
return True
else:
index = safe_cast_to_index(group)
return index.is_unique and index.is_monotonic
def _apply_loffset(grouper, result):
"""
(copied from pandas)
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(grouper.loffset, (pd.DateOffset, datetime.timedelta))
and isinstance(result.index, pd.DatetimeIndex)
and len(result.index) > 0
)
if needs_offset:
result.index = result.index + grouper.loffset
grouper.loffset = None
class GroupBy(SupportsArithmetic):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
__slots__ = (
"_full_index",
"_inserted_dims",
"_group",
"_group_dim",
"_group_indices",
"_groups",
"_obj",
"_restore_coord_dims",
"_stacked_dim",
"_unique_coord",
"_dims",
)
def __init__(
self,
obj,
group,
squeeze=False,
grouper=None,
bins=None,
restore_coord_dims=None,
cut_kwargs={},
):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray
Array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
bins : array-like, optional
If `bins` is specified, the groups will be discretized into the
specified bins by `pandas.cut`.
restore_coord_dims : bool, optional
If True, also restore the dimension order of multi-dimensional
coordinates.
cut_kwargs : dict, optional
Extra keyword arguments to pass to `pandas.cut`
"""
from .dataarray import DataArray
if grouper is not None and bins is not None:
raise TypeError("can't specify both `grouper` and `bins`")
if not isinstance(group, (DataArray, IndexVariable)):
if not hashable(group):
raise TypeError(
"`group` must be an xarray.DataArray or the "
"name of an xarray variable or dimension"
)
group = obj[group]
if len(group) == 0:
raise ValueError(f"{group.name} must not be empty")
if group.name not in obj.coords and group.name in obj.dims:
# DummyGroups should not appear on groupby results
group = _DummyGroup(obj, group.name, group.coords)
if getattr(group, "name", None) is None:
raise ValueError("`group` must have a name")
group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
group_dim, = group.dims
expected_size = obj.sizes[group_dim]
if group.size != expected_size:
raise ValueError(
"the group variable's length does not "
"match the length of this variable along its "
"dimension"
)
full_index = None
if bins is not None:
if duck_array_ops.isnull(bins).all():
raise ValueError("All bin edges are NaN.")
binned = pd.cut(group.values, bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
group = DataArray(binned, group.coords, name=new_dim_name)
full_index = binned.categories
if grouper is not None:
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError("index must be monotonic for resampling")
full_index, first_items = self._get_index_and_items(index, grouper)
sbins = first_items.values.astype(np.int64)
group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [
slice(sbins[-1], None)
]
unique_coord = IndexVariable(group.name, first_items.index)
elif group.dims == (group.name,) and _unique_and_monotonic(group):
# no need to factorize
group_indices = np.arange(group.size)
if not squeeze:
# use slices to do views instead of fancy indexing
# equivalent to: group_indices = group_indices.reshape(-1, 1)
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(
safe_cast_to_index(group), sort=(bins is None)
)
unique_coord = IndexVariable(group.name, unique_values)
if len(group_indices) == 0:
if bins is not None:
raise ValueError(
"None of the data falls within bins with edges %r" % bins
)
else:
raise ValueError(
"Failed to group data. Are you grouping by a variable that is all NaN?"
)
if (
isinstance(obj, DataArray)
and restore_coord_dims is None
and any(obj[c].ndim > 1 for c in obj.coords)
):
warnings.warn(
"This DataArray contains multi-dimensional "
"coordinates. In the future, the dimension order "
"of these coordinates will be restored as well "
"unless you specify restore_coord_dims=False.",
FutureWarning,
stacklevel=2,
)
restore_coord_dims = False
# specification for the groupby operation
self._obj = obj
self._group = group
self._group_dim = group_dim
self._group_indices = group_indices
self._unique_coord = unique_coord
self._stacked_dim = stacked_dim
self._inserted_dims = inserted_dims
self._full_index = full_index
self._restore_coord_dims = restore_coord_dims
# cached attributes
self._groups = None
self._dims = None
@property
def dims(self):
if self._dims is None:
self._dims = self._obj.isel(
**{self._group_dim: self._group_indices[0]}
).dims
return self._dims
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self._unique_coord.values, self._group_indices))
return self._groups
def __len__(self):
return self._unique_coord.size
def __iter__(self):
return zip(self._unique_coord.values, self._iter_grouped())
def __repr__(self):
return "{}, grouped over {!r} \n{!r} groups with labels {}.".format(
self.__class__.__name__,
self._unique_coord.name,
self._unique_coord.size,
", ".join(format_array_flat(self._unique_coord, 30).split()),
)
def _get_index_and_items(self, index, grouper):
from .resample_cftime import CFTimeGrouper
s = pd.Series(np.arange(index.size), index)
if isinstance(grouper, CFTimeGrouper):
first_items = grouper.first_items(index)
else:
first_items = s.groupby(grouper).first()
_apply_loffset(grouper, first_items)
full_index = first_items.index
if first_items.isnull().any():
first_items = first_items.dropna()
return full_index, first_items
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices})
def _infer_concat_args(self, applied_example):
if self._group_dim in applied_example.dims:
coord = self._group
positions = self._group_indices
else:
coord = self._unique_coord
positions = None
dim, = coord.dims
if isinstance(coord, _DummyGroup):
coord = None
return coord, dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._combine(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self._group.name: group_value})
except AttributeError:
raise TypeError(
"GroupBy objects only support binary ops "
"when the other argument is a Dataset or "
"DataArray"
)
except (KeyError, ValueError):
if self._group.name not in other.dims:
raise ValueError(
"incompatible dimensions for a grouped "
"binary operation: the group variable %r "
"is not a dimension on the other argument" % self._group.name
)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if self._full_index is not None and self._group.name in combined.dims:
indexers = {self._group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def _maybe_unstack(self, obj):
"""This gets called if we are applying on an array with a
multidimensional group."""
if self._stacked_dim is not None and self._stacked_dim in obj.dims:
obj = obj.unstack(self._stacked_dim)
for dim in self._inserted_dims:
if dim in obj.coords:
del obj.coords[dim]
return obj
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
out = ops.fillna(self, value)
return out
def where(self, cond, other=dtypes.NA):
"""Return elements from `self` or `other` depending on `cond`.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this objects values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, inserts missing values.
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return ops.where_method(self, cond, other)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self._group_indices[0], integer_types):
# NB. this is currently only used for reductions along an existing
# dimension
return self._obj
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
return self.reduce(
op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs, allow_lazy=True
)
def first(self, skipna=None, keep_attrs=None):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=None):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)
def assign_coords(self, coords=None, **coords_kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
Dataset.swap_dims
"""
coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords")
return self.apply(lambda ds: ds.assign_coords(**coords_kwargs))
def _maybe_reorder(xarray_obj, dim, positions):
order = _inverse_permutation_indices(positions)
if order is None:
return xarray_obj
else:
return xarray_obj[{dim: order}]
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}]
def _concat_shortcut(self, applied, dim, positions=None):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(applied, dim, shortcut=True)
reordered = _maybe_reorder(stacked, dim, positions)
result = self._obj._replace_maybe_drop_dims(reordered)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self._group.name:
dimension, = self._group.dims
if dimension in self._obj.dims:
axis = self._obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)
def apply(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)
return self._combine(applied, shortcut=shortcut)
def _combine(self, applied, restore_coord_dims=False, shortcut=False):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, dim, positions)
else:
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if isinstance(combined, type(self._obj)):
# only restore dimension order for arrays
combined = self._restore_dim_order(combined)
if coord is not None:
if shortcut:
combined._coords[coord.name] = as_variable(coord)
else:
combined.coords[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def quantile(self, q, dim=None, interpolation="linear", keep_attrs=None):
"""Compute the qth quantile over each array in the groups and
concatenate them together into a new array.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply quantile.
Defaults to the grouped dimension.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
if dim is None:
dim = self._group_dim
out = self.apply(
self._obj.__class__.quantile,
shortcut=False,
q=q,
dim=dim,
interpolation=interpolation,
keep_attrs=keep_attrs,
)
if np.asarray(q, dtype=np.float64).ndim == 0:
out = out.drop("quantile")
return out
def reduce(
self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs
):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is None:
dim = self._group_dim
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
check_reduce_dims(dim, self.dims)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, args=(), shortcut=None, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
args : tuple, optional
Positional arguments to pass to `func`.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
# ignore shortcut if set (for now)
applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())
return self._combine(applied)
def _combine(self, applied):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if coord is not None:
combined[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, keep_attrs=None, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is None:
dim = self._group_dim
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
check_reduce_dims(dim, self.dims)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
|
apache-2.0
|
RomainBrault/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
sinhrks/seaborn
|
seaborn/palettes.py
|
21
|
43467
|
from __future__ import division
import colorsys
from itertools import cycle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from .external import husl
from .external.six import string_types
from .external.six.moves import range
from .utils import desaturate, set_hls_values
from .xkcd_rgb import xkcd_rgb
from .crayons import crayons
from .miscplot import palplot
SEABORN_PALETTES = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"]
)
class _ColorPalette(list):
"""Set the color palette in a with statement, otherwise be a list."""
def __enter__(self):
"""Open the context."""
from .rcmod import set_palette
self._orig_palette = color_palette()
set_palette(self)
return self
def __exit__(self, *args):
"""Close the context."""
from .rcmod import set_palette
set_palette(self._orig_palette)
def as_hex(self):
"""Return a color palette with hex codes instead of RGB values."""
hex = [mpl.colors.rgb2hex(rgb) for rgb in self]
return _ColorPalette(hex)
def color_palette(palette=None, n_colors=None, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any named matplotlib palette, list of colors
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
(These options are mutually exclusive, but the resulting list of colors
can also be reversed).
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette: None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle.
desat : float, optional
Proportion to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette. Behaves like a list, but can be used as a context
manager and possesses an ``as_hex`` method to convert to hex color
codes.
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
Show one of the "seaborn palettes", which have the same basic order of hues
as the default matplotlib color cycle but more attractive colors.
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.color_palette("muted"))
Use discrete values from one of the built-in matplotlib colormaps.
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("RdBu", n_colors=7))
Make a "dark" matplotlib sequential palette variant. (This can be good
when coloring multiple lines or points that correspond to an ordered
variable, where you don't want the lightest lines to be invisible).
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("Blues_d"))
Use a categorical matplotlib palette, add some desaturation. (This can be
good when making plots with large patches, which look best with dimmer
colors).
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("Set1", n_colors=8, desat=.5))
Use as a context manager:
.. plot::
:context: close-figs
>>> import numpy as np, matplotlib.pyplot as plt
>>> with sns.color_palette("husl", 8):
... _ = plt.plot(np.c_[np.zeros(8), np.arange(8)].T)
"""
if palette is None:
palette = mpl.rcParams["axes.color_cycle"]
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, string_types):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
n_colors = 6
if palette == "hls":
palette = hls_palette(n_colors)
elif palette == "husl":
palette = husl_palette(n_colors)
elif palette.lower() == "jet":
raise ValueError("No.")
elif palette in SEABORN_PALETTES:
palette = SEABORN_PALETTES[palette]
elif palette in dir(mpl.cm):
palette = mpl_palette(palette, n_colors)
elif palette[:-2] in dir(mpl.cm):
palette = mpl_palette(palette, n_colors)
else:
raise ValueError("%s is not a valid palette name" % palette)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(palette))
return palette
def hls_palette(n_colors=6, h=.01, l=.6, s=.65):
"""Get a set of evenly spaced colors in HLS hue space.
h, l, and s should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
l : float
lightness
s : float
saturation
Returns
-------
palette : seaborn color palette
List-like object of colors as RGB tuples.
See Also
--------
husl_palette : Make a palette using evently spaced circular hues in the
HUSL system.
Examples
--------
Create a palette of 10 colors with the default parameters:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.hls_palette(10))
Create a palette of 10 colors that begins at a different hue value:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, h=.5))
Create a palette of 10 colors that are darker than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, l=.4))
Create a palette of 10 colors that are less saturated than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, s=.4))
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues -= hues.astype(int)
palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]
return _ColorPalette(palette)
def husl_palette(n_colors=6, h=.01, s=.9, l=.65):
"""Get a set of evenly spaced colors in HUSL hue space.
h, s, and l should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
s : float
saturation
l : float
lightness
Returns
-------
palette : seaborn color palette
List-like object of colors as RGB tuples.
See Also
--------
hls_palette : Make a palette using evently spaced circular hues in the
HSL system.
Examples
--------
Create a palette of 10 colors with the default parameters:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.husl_palette(10))
Create a palette of 10 colors that begins at a different hue value:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, h=.5))
Create a palette of 10 colors that are darker than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, l=.4))
Create a palette of 10 colors that are less saturated than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, s=.4))
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues *= 359
s *= 99
l *= 99
palette = [husl.husl_to_rgb(h_i, s, l) for h_i in hues]
return _ColorPalette(palette)
def mpl_palette(name, n_colors=6):
"""Return discrete colors from a matplotlib palette.
Note that this handles the qualitative colorbrewer palettes
properly, although if you ask for more colors than a particular
qualitative palette can provide you will get fewer than you are
expecting. In contrast, asking for qualitative color brewer palettes
using :func:`color_palette` will return the expected number of colors,
but they will cycle.
If you are using the IPython notebook, you can also use the function
:func:`choose_colorbrewer_palette` to interactively select palettes.
Parameters
----------
name : string
Name of the palette. This should be a named matplotlib colormap.
n_colors : int
Number of discrete colors in the palette.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
Examples
--------
Create a qualitative colorbrewer palette with 8 colors:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.mpl_palette("Set2", 8))
Create a sequential colorbrewer palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("Blues"))
Create a diverging palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("seismic", 8))
Create a "dark" sequential palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("GnBu_d"))
"""
brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8,
"Set1": 9, "Set2": 8, "Set3": 12}
if name.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(name.replace("_d", "_r"), 2))
cmap = blend_palette(pal, n_colors, as_cmap=True)
else:
cmap = getattr(mpl.cm, name)
if name in brewer_qual_pals:
bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors]
else:
bins = np.linspace(0, 1, n_colors + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
return _ColorPalette(palette)
def _color_to_rgb(color, input):
"""Add some more flexibility to color choices."""
if input == "hls":
color = colorsys.hls_to_rgb(*color)
elif input == "husl":
color = husl.husl_to_rgb(*color)
elif input == "xkcd":
color = xkcd_rgb[color]
return color
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from dark to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_dark_palette` function.
Parameters
----------
color : base color for high values
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
Generate a palette from an HTML color:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.dark_palette("purple"))
Generate a palette that decreases in lightness:
.. plot::
:context: close-figs
>>> sns.palplot(sns.dark_palette("seagreen", reverse=True))
Generate a palette from an HUSL-space seed:
.. plot::
:context: close-figs
>>> sns.palplot(sns.dark_palette((260, 75, 60), input="husl"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.dark_palette("#2ecc71", as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
color = _color_to_rgb(color, input)
gray = "#222222"
colors = [color, gray] if reverse else [gray, color]
return blend_palette(colors, n_colors, as_cmap)
def light_palette(color, n_colors=6, reverse=False, as_cmap=False,
input="rgb"):
"""Make a sequential palette that blends from light to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_light_palette` function.
Parameters
----------
color : base color for high values
hex code, html color name, or tuple in ``input`` space.
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
Generate a palette from an HTML color:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.light_palette("purple"))
Generate a palette that increases in lightness:
.. plot::
:context: close-figs
>>> sns.palplot(sns.light_palette("seagreen", reverse=True))
Generate a palette from an HUSL-space seed:
.. plot::
:context: close-figs
>>> sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.light_palette("#2ecc71", as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
color = _color_to_rgb(color, input)
light = set_hls_values(color, l=.95)
colors = [color, light] if reverse else [light, color]
return blend_palette(colors, n_colors, as_cmap)
def _flat_palette(color, n_colors=6, reverse=False, as_cmap=False,
input="rgb"):
"""Make a sequential palette that blends from gray to ``color``.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
Returns
-------
palette : list or colormap
dark_palette : Create a sequential palette with dark low values.
"""
color = _color_to_rgb(color, input)
flat = desaturate(color, 0)
colors = [color, flat] if reverse else [flat, color]
return blend_palette(colors, n_colors, as_cmap)
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=10, n=6, center="light",
as_cmap=False):
"""Make a diverging palette between two HUSL colors.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_diverging_palette` function.
Parameters
----------
h_neg, h_pos : float in [0, 359]
Anchor hues for negative and positive extents of the map.
s : float in [0, 100], optional
Anchor saturation for both extents of the map.
l : float in [0, 100], optional
Anchor lightness for both extents of the map.
n : int, optional
Number of colors in the palette (if not returning a cmap)
center : {"light", "dark"}, optional
Whether the center of the palette is light or dark
as_cmap : bool, optional
If true, return a matplotlib colormap object rather than a
list of colors.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
dark_palette : Create a sequential palette with dark values.
light_palette : Create a sequential palette with light values.
Examples
--------
Generate a blue-white-red palette:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.diverging_palette(240, 10, n=9))
Generate a brighter green-white-purple palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.diverging_palette(150, 275, s=80, l=55, n=9))
Generate a blue-black-red palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.diverging_palette(250, 15, s=75, l=40,
... n=9, center="dark"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.diverging_palette(220, 20, sep=20, as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
palfunc = dark_palette if center == "dark" else light_palette
neg = palfunc((h_neg, s, l), 128 - (sep / 2), reverse=True, input="husl")
pos = palfunc((h_pos, s, l), 128 - (sep / 2), input="husl")
midpoint = dict(light=[(.95, .95, .95, 1.)],
dark=[(.133, .133, .133, 1.)])[center]
mid = midpoint * sep
pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)
return pal
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"):
"""Make a palette that blends between a list of colors.
Parameters
----------
colors : sequence of colors in various formats interpreted by ``input``
hex code, html color name, or tuple in ``input`` space.
n_colors : int, optional
Number of colors in the palette.
as_cmap : bool, optional
If True, return as a matplotlib colormap instead of list.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
"""
colors = [_color_to_rgb(color, input) for color in colors]
name = "blend"
pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)
if not as_cmap:
pal = _ColorPalette(pal(np.linspace(0, 1, n_colors)))
return pal
def xkcd_palette(colors):
"""Make a palette with color names from the xkcd color survey.
See xkcd for the full list of colors: http://xkcd.com/color/rgb/
This is just a simple wrapper around the ``seaborn.xkcd_rgb`` dictionary.
Parameters
----------
colors : list of strings
List of keys in the ``seaborn.xkcd_rgb`` dictionary.
Returns
-------
palette : seaborn color palette
Returns the list of colors as RGB tuples in an object that behaves like
other seaborn color palettes.
See Also
--------
crayon_palette : Make a palette with Crayola crayon colors.
"""
palette = [xkcd_rgb[name] for name in colors]
return color_palette(palette, len(palette))
def crayon_palette(colors):
"""Make a palette with color names from Crayola crayons.
Colors are taken from here:
http://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors
This is just a simple wrapper around the ``seaborn.crayons`` dictionary.
Parameters
----------
colors : list of strings
List of keys in the ``seaborn.crayons`` dictionary.
Returns
-------
palette : seaborn color palette
Returns the list of colors as rgb tuples in an object that behaves like
other seaborn color palettes.
See Also
--------
xkcd_palette : Make a palette with named colors from the XKCD color survey.
"""
palette = [crayons[name] for name in colors]
return color_palette(palette, len(palette))
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,
light=.85, dark=.15, reverse=False, as_cmap=False):
"""Make a sequential palette from the cubehelix system.
This produces a colormap with linearly-decreasing (or increasing)
brightness. That means that information will be preserved if printed to
black and white or viewed by someone who is colorblind. "cubehelix" is
also availible as a matplotlib-based palette, but this function gives the
user more control over the look of the palette and has a different set of
defaults.
Parameters
----------
n_colors : int
Number of colors in the palette.
start : float, 0 <= start <= 3
The hue at the start of the helix.
rot : float
Rotations around the hue wheel over the range of the palette.
gamma : float 0 <= gamma
Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)
colors.
hue : float, 0 <= hue <= 1
Saturation of the colors.
dark : float 0 <= dark <= 1
Intensity of the darkest color in the palette.
light : float 0 <= light <= 1
Intensity of the lightest color in the palette.
reverse : bool
If True, the palette will go from dark to light.
as_cmap : bool
If True, return a matplotlib colormap instead of a list of colors.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
choose_cubehelix_palette : Launch an interactive widget to select cubehelix
palette parameters.
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
References
----------
Green, D. A. (2011). "A colour scheme for the display of astronomical
intensity images". Bulletin of the Astromical Society of India, Vol. 39,
p. 289-295.
Examples
--------
Generate the default palette:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.cubehelix_palette())
Rotate backwards from the same starting location:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(rot=-.4))
Use a different starting point and shorter rotation:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1))
Reverse the direction of the lightness ramp:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(reverse=True))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.cubehelix_palette(as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
Use the full lightness range:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
cdict = mpl._cm.cubehelix(gamma, start, rot, hue)
cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict)
x = np.linspace(light, dark, n_colors)
pal = cmap(x)[:, :3].tolist()
if reverse:
pal = pal[::-1]
if as_cmap:
x_256 = np.linspace(light, dark, 256)
if reverse:
x_256 = x_256[::-1]
pal_256 = cmap(x_256)
cmap = mpl.colors.ListedColormap(pal_256)
return cmap
else:
return _ColorPalette(pal)
def set_color_codes(palette="deep"):
"""Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : {deep, muted, pastel, dark, bright, colorblind}
Named seaborn palette to use as the source of colors.
See Also
--------
set : Color codes can be set through the high-level seaborn style
manager.
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
Examples
--------
Map matplotlib color codes to the default seaborn palette.
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns; sns.set()
>>> sns.set_color_codes()
>>> _ = plt.plot([0, 1], color="r")
Use a different seaborn palette.
.. plot::
:context: close-figs
>>> sns.set_color_codes("dark")
>>> _ = plt.plot([0, 1], color="g")
>>> _ = plt.plot([0, 2], color="m")
"""
if palette == "reset":
colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, .75, 0.),
(.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]
else:
colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def choose_colorbrewer_palette(data_type, as_cmap=False):
"""Select a palette from the ColorBrewer set.
These palettes are built into matplotlib and can be used by name in
many seaborn functions, or by passing the object returned by this function.
Parameters
----------
data_type : {'sequential', 'diverging', 'qualitative'}
This describes the kind of data you want to visualize. See the seaborn
color palette docs for more information about how to choose this value.
Note that you can pass substrings (e.g. 'q' for 'qualitative.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette from selected colors.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
from IPython.html.widgets import interact, FloatSliderWidget
if data_type.startswith("q") and as_cmap:
raise ValueError("Qualitative palettes cannot be colormaps.")
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if data_type.startswith("s"):
opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
"BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
"PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
variants = ["regular", "reverse", "dark"]
@interact
def choose_sequential(name=opts, n=(2, 18),
desat=FloatSliderWidget(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
elif variant == "dark":
name += "_d"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("d"):
opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
"RdYlBu", "RdYlGn", "Spectral"]
variants = ["regular", "reverse"]
@interact
def choose_diverging(name=opts, n=(2, 16),
desat=FloatSliderWidget(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("q"):
opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
"Pastel1", "Pastel2", "Dark2"]
@interact
def choose_qualitative(name=opts, n=(2, 16),
desat=FloatSliderWidget(min=0, max=1, value=1)):
pal[:] = color_palette(name, n, desat)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_dark_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a dark sequential palette.
This corresponds with the :func:`dark_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`dark_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
from IPython.html.widgets import interact
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_dark_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = dark_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_dark_palette_hls(h=(0., 1.),
l=(0., 1.),
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = dark_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_dark_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99),
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = dark_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_light_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a light sequential palette.
This corresponds with the :func:`light_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`light_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
light_palette : Create a sequential palette with bright low values.
dark_palette : Create a sequential palette with dark low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
from IPython.html.widgets import interact
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_light_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = light_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_light_palette_hls(h=(0., 1.),
l=(0., 1.),
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = light_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_light_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99),
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = light_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
from IPython.html.widgets import interact, IntSliderWidget
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(h_neg=IntSliderWidget(min=0,
max=359,
value=220),
h_pos=IntSliderWidget(min=0,
max=359,
value=10),
s=IntSliderWidget(min=0, max=99, value=74),
l=IntSliderWidget(min=0, max=99, value=50),
sep=IntSliderWidget(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_cubehelix_palette(as_cmap=False):
"""Launch an interactive widget to create a sequential cubehelix palette.
This corresponds with the :func:`cubehelix_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values. The cubehelix system allows the
palette to have more hue variance across the range, which can be helpful
for distinguishing a wider range of values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
from IPython.html.widgets import (interact,
FloatSliderWidget, IntSliderWidget)
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_cubehelix(n_colors=IntSliderWidget(min=2, max=16, value=9),
start=FloatSliderWidget(min=0, max=3, value=0),
rot=FloatSliderWidget(min=-1, max=1, value=.4),
gamma=FloatSliderWidget(min=0, max=5, value=1),
hue=FloatSliderWidget(min=0, max=1, value=.8),
light=FloatSliderWidget(min=0, max=1, value=.85),
dark=FloatSliderWidget(min=0, max=1, value=.15),
reverse=False):
if as_cmap:
colors = cubehelix_palette(256, start, rot, gamma,
hue, light, dark, reverse)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
hue, light, dark, reverse)
palplot(pal)
if as_cmap:
return cmap
return pal
|
bsd-3-clause
|
mrklees/cy-automation-library
|
cyautomation/cyschoolhouse/student.py
|
2
|
7352
|
import os
from time import sleep
import numpy as np
import pandas as pd
from selenium.webdriver.support.ui import Select
from . import simple_cysh as cysh
from .cyschoolhousesuite import get_driver, open_cyschoolhouse
from .sendemail import send_email
def _sf_api_approach():
""" This is how the task would be accomplished via salesforce API, if only I could edit the fields:
"""
df = pd.read_excel(r'C:\Users\City_Year\Downloads\New Students for cyschoolhouse(1-11).xlsx')
school_df = get_cysh_df('Account', ['Id', 'Name'])
df = df.merge(school_df, how='left', left_on='School', right_on='Name')
drop_ids = []
for index, row in df.iterrows():
search_result = cysh.sf.query(f"SELECT Id FROM Student__c WHERE Local_Student_ID__c = '{row['Student CPS ID']}'")
if len(search_result['records']) > 0:
drop_ids.append(row['Student CPS ID'])
df = df.loc[~df['Student CPS ID'].isin(drop_ids)]
for index, row in df.iterrows():
stu_dict = {
'Local_Student_ID__c':str(row['Student CPS ID']),
'School__c':row['Id'],
'Name':(row['Student First Name'] + ' ' + row['Student Last Name']),
'Student_Last_Name__c':row['Student Last Name'],
'Grade__c':str(row['Student Grade Level']),
#'School_Name__c':row['Name_y'],
}
return None
def import_parameters(xlsx_path, enrollment_date):
"""Imports input data from xlsx
`enrollment_date` in the format 'MM/DD/YYYY'
"""
df = pd.read_excel(xlsx_path, converters={'*REQ* Grade':int})
column_rename = {
'Student CPS ID':'*REQ* Local Student ID',
'Student First Name':'*REQ* First Name',
'Student Last Name':'*REQ* Last Name',
'Student Grade Level':'*REQ* Grade',
}
df.rename(columns=column_rename, inplace=True)
df["*REQ* Student Id"] = df['*REQ* Local Student ID']
df["*REQ* Type"] = 'Student'
if "*REQ* Entry Date" not in df.columns:
df["*REQ* Entry Date"] = enrollment_date
for col in ["Date of Birth", "Gender", "Ethnicity", "Disability Flag", "ELL"]:
if col not in df.columns:
df[col] = np.nan
col_order = [
'School',
'*REQ* Student Id',
'*REQ* Local Student ID',
'*REQ* First Name',
'*REQ* Last Name',
'*REQ* Grade',
'Date of Birth',
'Gender',
'Ethnicity',
'Disability Flag',
'ELL',
'*REQ* Entry Date',
'*REQ* Type',
]
df = df[col_order]
return df
def remove_extant_students(df, sf=cysh.sf):
local_student_id_col = '*REQ* Local Student ID'
drop_ids = []
for index, row in df.iterrows():
search_result = sf.query(f"SELECT Id FROM Student__c WHERE Local_Student_ID__c = '{row[local_student_id_col]}'")
if len(search_result['records']) > 0:
drop_ids.append(row[local_student_id_col])
del search_result
df = df.loc[~df[local_student_id_col].isin(drop_ids)]
return df
def input_file(driver, path_to_csv):
driver.find_element_by_xpath('//*[@id="selectedFile"]').send_keys(path_to_csv)
driver.find_element_by_xpath('//*[@id="j_id0:j_id42"]/div[3]/div[1]/div[6]/input[2]').click()
def insert_data(driver):
driver.find_element_by_xpath('//*[@id="startBatchButton"]').click()
def upload_all(enrollment_date, xlsx_dir=os.path.join(os.path.dirname(__file__),'input_files'), xlsx_name='New Students for cyschoolhouse.xlsx', sf=cysh.sf):
""" Runs the entire student upload process.
"""
xlsx_path = os.path.join(xlsx_dir, xlsx_name)
params = import_parameters(xlsx_path, enrollment_date)
params = remove_extant_students(params)
setup_df = cysh.get_object_df('Setup__c', ['Id', 'School__c'], rename_id=True, rename_name=True)
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
setup_df = setup_df.merge(school_df, how='left', left_on='School__c', right_on='Id'); del school_df
setup_df = setup_df.loc[~setup_df['Id'].isnull()]
if len(params) == 0:
print(f'No new students to upload.')
return None
driver = get_driver()
open_cyschoolhouse(driver)
for school_name in params['School'].unique():
# Write csv
path_to_csv = os.path.join(xlsx_dir, f"SY19 New Students for CYSH - {school_name}.csv")
df_csv = params.loc[params["School"]==school_name].copy()
df_csv.drop(["School"], axis=1, inplace=True)
df_csv.to_csv(path_to_csv, index=False, date_format='%m/%d/%Y')
# Navigatge to student enrollment page
setup_id = setup_df.loc[setup_df['Name']==school_name, 'Setup__c'].values[0]
driver.get(f'https://c.na30.visual.force.com/apex/CT_core_LoadCsvData_v2?setupId={setup_id}&OldSideBar=true&type=Student')
sleep(2)
input_file(driver, path_to_csv)
sleep(2)
insert_data(driver)
sleep(2)
# Publish
# Seems to work, but not completely sure if script
# pauses until upload is complete, both for the "Insert Data"
# phase, and the "Publish Staff/Student Records" phase.
driver.get(f'https://c.na30.visual.force.com/apex/schoolsetup_staff?setupId={setup_id}')
driver.find_element_by_css_selector('input.red_btn').click()
sleep(3)
print(f"Uploaded {len(df_csv)} students")
os.remove(path_to_csv)
# Email school manager to inform of successful student upload
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
school_df.rename(columns={'Name':'School', 'Id':'Organization__c'}, inplace=True)
staff_df = cysh.get_object_df(
'Staff__c',
['Name', 'Email__c', 'Role__c', 'Organization__c'],
where=f"Organization__c IN ({str(school_df['Organization__c'].tolist())[1:-1]})"
)
staff_df = staff_df.merge(school_df, how='left', on='Organization__c')
staff_df = staff_df.loc[staff_df['Role__c'].str.lower()=='impact manager']
to_addrs = staff_df.loc[staff_df['School'].isin(params['School']), 'Email__c'].tolist()
send_email(
to_addrs = list(set(to_addrs)),
subject = 'New student(s) now in cyschoolhouse',
body = 'The student(s) you submitted have been successfully uploaded to cyschoolhouse.'
)
driver.quit()
def update_student_External_Id(prefix='CPS_', sf=cysh.sf):
""" Updates 'External_Id__c' field to 'CPS_' + 'Local_Student_ID__c'. Triggers external integrations at HQ.
"""
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
student_df = cysh.get_object_df(
'Student__c',
['Id', 'Local_Student_ID__c', 'External_Id__c'],
where=f"School__c IN ({str(school_df['Id'].tolist())[1:-1]})"
)
if sum(student_df['Local_Student_ID__c'].duplicated()) > 0:
raise ValueError(f'Error: Duplicates exist on Local_Student_ID__c.')
student_df = student_df.loc[student_df['External_Id__c'].isnull() & (student_df['Local_Student_ID__c'].str.len()==8)]
if len(student_df) == 0:
print(f'No students to fix IDs for.')
return None
results = []
for index, row in student_df.iterrows():
result = sf.Student__c.update(row['Id'],{'External_Id__c':(prefix + row['Local_Student_ID__c'])})
results.append(result)
return results
|
gpl-3.0
|
CforED/Machine-Learning
|
examples/svm/plot_custom_kernel.py
|
171
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/io/clipboards.py
|
1
|
4334
|
""" io on the clipboard """
from io import StringIO
import warnings
from pandas.core.dtypes.generic import ABCDataFrame
from pandas import get_option, option_context
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_csv
text = clipboard_get()
# Try to decode (if needed, as "text" might already be a string here).
try:
text = text.decode(kwargs.get('encoding')
or get_option('display.encoding'))
except AttributeError:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_csv
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = {x.lstrip().count('\t') for x in lines}
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
# Edge case where sep is specified to be None, return to default
if sep is None and kwargs.get('delim_whitespace') is None:
sep = r'\s+'
# Regex separator currently only works with python engine.
# Default to python if separator is multi-character (regex)
if len(sep) > 1 and kwargs.get('engine') is None:
kwargs['engine'] = 'python'
elif len(sep) > 1 and kwargs.get('engine') == 'c':
warnings.warn('read_clipboard with regex separator does not work'
' properly with c engine')
return read_csv(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
clipboard_set(text)
return
except TypeError:
warnings.warn('to_clipboard in excel mode requires a single '
'character separator.')
elif sep is not None:
warnings.warn('to_clipboard with excel=False ignores the sep argument')
if isinstance(obj, ABCDataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
|
bsd-3-clause
|
lvsoft/HiBench
|
bin/report_gen_plot.py
|
22
|
5011
|
#!/usr/bin/env python
#coding: utf-8
import sys, os, re
from pprint import pprint
from collections import defaultdict, namedtuple
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
RecordRaw=namedtuple("RecordRaw", "type durtation data_size throughput_total throughput_per_node")
Record=namedtuple("Record", "type language durtation data_size throughput_total throughput_per_node")
def human_readable_size(n):
"convert number into human readable string"
if n<1000: return str(n)
if n<800000: return "%.3fK" % (n/1000.0)
if n<800000000: return "%.3fM" % (n/1000000.0)
if n<800000000000: return "%.3fG" % (n/1000000000.0)
return "%.3fT" % (n/1000000000000.0)
def group_by_type(datas):
groups = defaultdict(dict)
for i in datas:
words = re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', i.type).split()
prefix = words[0].lower()
suffix = "_".join([x.lower() for x in words[1:]])
groups[suffix][prefix] = Record(type = "".join(words[1:]),
language = prefix,
durtation = i.durtation,
data_size = i.data_size,
throughput_total = i.throughput_total,
throughput_per_node = i.throughput_per_node
)
return dict(groups)
def report_plot(fn):
if not os.path.isfile(fn):
print "Failed to find `sparkbench.report`"
sys.exit(1)
with open(fn) as f:
data = [x.split() for x in f.readlines()[1:] if x.strip() and not x.strip().startswith('#')]
pprint(data, width=300)
groups = group_by_type([RecordRaw(type = x[0],
data_size = int(x[3]),
durtation = float(x[4]),
throughput_total = int(x[5]) / 1024.0 / 1024,
throughput_per_node = int(x[6]) / 1024.0 /1024
) for x in data])
#print groups
base_dir = os.path.dirname(fn)
plot(groups, "Seconds of durtations (Less is better)", "Seconds", "durtation", os.path.join(base_dir, "durtation.png"))
# plot(groups, "Throughput in total (Higher is better)", "MB/s", "throughput_total", os.path.join(base_dir, "throughput_total.png"))
# plot(groups, "Throughput per node (Higher is better)", "MB/s", "throughput_per_node", os.path.join(base_dir, "throughput_per_node.png"))
def plot(groups, title="Seconds of durtations", ylabel="Seconds", value_field="durtation", fig_fn = "foo.png"):
# plot it
keys = groups.keys()
languages = sorted(reduce(lambda x,y: x.union(y), [set([groups[x][y].language for y in groups[x]]) for x in groups]))
width = 0.15
rects = []
fig = plt.figure()
ax = plt.axes()
colors='rgbcymw'
# NCURVES=10
# curves = [np.random.random(20) for i in range(NCURVES)]
# values = range(NCURVES)
# jet = colors.Colormap('jet')
# cNorm = colors.Normalize(vmin=0, vmax=values[-1])
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
patterns = ('-', '+', 'x', '\\', '/', '*', '.', 'O')
for idx, lang in enumerate(languages):
rects.append(ax.bar([x + width * (idx + 1) for x in range(len(keys))], # x index
[getattr(groups[x][lang], value_field) if x in groups and groups[x].has_key(lang) else 0 for x in keys], # value
width,
color = colors[idx],
hatch = patterns[idx]
) # width
)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(24)
ax.set_ylabel(ylabel, fontname="Arial", size="32")
ax.set_title(title, fontname="Arial", size="44")
x_axis_offset = len(languages)* width /2.0
ax.set_xticks([(x + width + x_axis_offset) for x in range(len(keys))])
ax.set_xticklabels(["%s \n@%s" % (x, human_readable_size(groups[x].values()[0].data_size)) for x in keys])
ax.grid(True)
ax.legend([x[0] for x in rects],
languages)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height),
ha='center', va='bottom')
# [autolabel(x) for x in rects]
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.savefig(fig_fn, dpi=100)
if __name__ == "__main__":
try:
default_report_fn = sys.argv[1]
except:
default_report_fn = os.path.join(os.path.dirname(__file__), "..", "sparkbench.report")
report_plot(default_report_fn)
|
apache-2.0
|
IssamLaradji/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
bikong2/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
nrhine1/scikit-learn
|
sklearn/linear_model/tests/test_passive_aggressive.py
|
169
|
8809
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
|
bsd-3-clause
|
PatrickOReilly/scikit-learn
|
examples/decomposition/plot_pca_3d.py
|
354
|
2432
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
|
bsd-3-clause
|
jblackburne/scikit-learn
|
sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
bsd-3-clause
|
aewhatley/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
142
|
6276
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.