repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
robbymeals/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexing/test_indexing_slow.py | 2 | 3775 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas.core.api import DataFrame, MultiIndex, Series
import pandas.util.testing as tm
class TestIndexingSlow(object):
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
| bsd-3-clause |
evanl/vesa_tough_comparison | vesa/vesa_v02_13/vesa_reading_functions.py | 2 | 12056 | import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import read_eclipse as re
import eclipse_cells as ec
from matplotlib import cm
class Cell:
def __init__(self, x, y, z_top, z_bot, p_init):
self.x_center = x
self.y_center = y
self.z_top = z_top
self.z_bot = z_bot
self.p_init = p_init
self.thickness = self.z_top - self.z_bot
self.saturation = []
self.pressure = []
self.delta_p = []
self.sat_thickness = []
def __str__(self):
a = str(self.get_x()) + ' '
b = str(self.get_y()) + ' '
c = str(self.get_z_top()) + ' '
d = str(self.get_z_bot()) + ' '
return a + b + c + d
def get_x(self):
return self.x_center
def get_y(self):
return self.y_center
def get_z_top(self):
return self.z_top
def get_thickness(self):
return self.z_top - self.z_bot
def get_z_bot(self):
return self.z_bot
def get_item_list(self, valtype):
if valtype == 'pressure':
cvals = self.pressure
elif valtype == 'saturation':
cvals = self.saturation
elif valtype == 'thickness':
cvals = self.sat_thickness
elif valtype == 'delta_p':
cvals = self.delta_p
else:
print "please specify a valid list"
return 1
return cvals
def read_output_data(layer = 'SleipnerL9'):
print "Reading VESA output from layer: " + layer
# reads the pressure file
r = open('p.csv','r')
# line 1 - x values
line = r.readline()
tempx = line.split(', ')
tempx.remove('')
tempx[-1] = tempx[-1].replace(' \n','')
# line 2 - y values
line = r.readline()
tempy = line.split(', ')
tempy.remove('')
tempy[-1] = tempy[-1].replace(' \n','')
#get boundaries, initial pressure
z_top = []
z_bot = []
p_init = []
fin = open(layer + '.txt','r')
for i in range(14):
line = fin.readline()
while line:
s = line.split(',')
if s[-1] == '\n':
s[-1] = s[-1].replace( '\n','')
if s[-1] == '':
s.remove('')
z_bot.append(s[8])
z_top.append(s[9])
p_init.append(s[12])
line = fin.readline()
# initialize all cells
cells = []
for i in range(0,len(tempx)):
c_in = Cell(float(tempx[i]), float(tempy[i]), \
float(z_top[i]), float(z_bot[i]), float(p_init[i]))
cells.append(c_in)
# line 3 - layer ID
line = r.readline()
# line 4 to end - pressures
line = r.readline()
templine = line.split(',')
baseline = templine
baseline[-1] = templine[-1].replace(' \n','')
time_steps = []
while line:
time_steps.append(float(templine[0]))
templine[-1] = templine[-1].replace(' \n','')
for i in range(1,len(templine)):
cells[i-1].pressure.append(float(templine[i]))
cells[i-1].delta_p.append(float(templine[i]) - cells[i-1].p_init)
line = r.readline()
templine = line.split(',')
r.close()
fin.close()
# reads in the saturation values
r = open('scbar.csv','r')
# skip the first 3 lines of the text file.
# information has already been read
for i in range(0,4):
line = r.readline()
while line:
templine = line.split(',')
del templine[0]
templine[-1] = templine[-1].replace('\n','')
for i in range(0,len(templine)):
cells[i].saturation.append(float(templine[i]))
cells[i].sat_thickness.append(\
float(templine[i]) * cells[i].get_thickness() /\
(1 - 0.2))
line = r.readline()
r.close()
return cells , time_steps
def mass_balance_read_print():
r = open("MassBalance.csv",'r')
line = r.readline()
line = r.readline()
years_mass = []
injected_mass = []
actual_mass = []
boundary_mass= []
while line:
line = line.split(',')
years_mass.append(float(line[0]))
actual_mass.append(float(line[1]))
injected_mass.append(float(line[2]))
boundary_mass.append(float(line[3]))
line = r.readline()
perdiff_mass = []
for i in range(len(injected_mass)):
if injected_mass[i] != 0.:
perdiff_mass.append(\
(actual_mass[i] - injected_mass[i]+ boundary_mass[i]) \
/ injected_mass[i] * 100)
else:
perdiff_mass.append(actual_mass[i] - injected_mass[i] + \
boundary_mass[i])
print 'year | mass balance error'
for j in range(0,len(perdiff_mass)):
print j+1, ' | '+str(perdiff_mass[j])
f = open("massBalanceError.txt",'w')
f.write("Year | Mass Balance Error \n" )
for j in range(0,len(perdiff_mass)):
f.write(str(j+1) + " | " + str(perdiff_mass[j]) + "\n")
f.close()
def val_bounds(cells, valtype):
# calculates upper and lower bounds for pressure to ensure
# comparable colorscales across contour plots
if valtype == 'saturation':
v_min = 0.0
v_max = 0.8
elif valtype == 'thickness':
v_min = 0.0
v_max = 15.
else:
val_bound = []
for c in cells:
cvals = c.get_item_list(valtype)
for el in cvals:
val_bound.append(el)
v_min = min(val_bound)
v_max = max(val_bound)
return v_min, v_max
def make_plot_grid(cells, time_index, nx, ny, valtype):
val = []
x_list = []
y_list = []
temp_val = []
counter = 0
for cel in cells:
if counter < nx:
x_list.append(cel.get_x())
temp_val.append(cel.get_item_list(valtype)[time_index])
counter +=1
if (counter % nx ) == 0:
y_list.append(cel.get_y())
val.append(temp_val)
temp_val = []
xs = np.array(x_list)
ys = np.array(y_list)
x, y = np.meshgrid(xs, ys)
zval = np.asarray(val)
return x, y, zval
def plot_timestep_contour(x, y, zval, time, i, valtype, v_val, fmt,\
yearwise = False):
yrstring = '{:4d}'.format(int(time/365 + 1998))
f_val = plt.figure(num=None, figsize=(7.5,10), dpi = 480, \
facecolor = 'w', edgecolor = 'k')
ax_val = f_val.add_subplot(111)
ax_val.set_xlabel('x-direction [m]')
ax_val.set_ylabel('y-direction [m]')
cs_val = ax_val.contourf(x,y,zval,v_val)
ax_val.set_aspect('equal')
if valtype =='pressure':
clab = valtype + " [Pa]"
cfm = '%.3e'
elif valtype == 'delta_p':
clab = valtype + " [Pa]"
cfm = '%.3e'
elif valtype == 'thickness':
clab = "CO2 Plume Thickness [m]"
cfm = '%.1f'
elif valtype == 'saturation':
clab = "CO2 Saturation []"
cfm = '%.2f'
cb_val = plt.colorbar(cs_val, shrink = 0.8, \
extend = 'both', ticks = v_val, format=cfm)
cb_val.set_label(clab)
if yearwise == True:
val_str = valtype + '_' + yrstring
else:
val_str = valtype + '_' + '{:02d}'.format(i+1)
f_val.suptitle(val_str)
f_val.savefig(val_str + "." + fmt ,bbox_inches='tight', \
format = fmt)
plt.clf()
plt.close()
return 0
def plot_vesa_timesteps(cells, time_steps, nx, ny, \
valtype = 'pressure', fmt = 'eps', yearwise = False):
font = { 'size' : '12'}
matplotlib.rc('font', **font)
n_levels = 21
v_min, v_max = val_bounds(cells, valtype)
v_val = np.linspace(v_min, v_max, num = n_levels)
for time_index in range(0,len(cells[0].get_item_list(valtype))):
print "Plotting " + valtype + " timestep: " + str(time_index)
x, y, zval = make_plot_grid(cells, time_index, nx, ny, valtype)
plot_timestep_contour(x, y, zval, time_steps[time_index], \
time_index, valtype, v_val, fmt, yearwise = yearwise)
def plot_wellhead_pressure(cells, time_steps, hydro_directory, hydro_layer_name, \
x_well = 1600., y_well = 2057.75,\
fmt = 'png', sleipner = True):
print "Plotting Wellhead Pressure..."
# find cell with that is the wellhead cell
if sleipner == True:
well_head_index = 2697
else:
well_head_index = 313
os.chdir(hydro_directory + '/')
hydro_cells, hydro_time_steps = read_output_data(layer = hydro_layer_name)
os.chdir('../')
font = { 'size' : '16'}
matplotlib.rc('font', **font)
# for that cell, get the pressure over time
if well_head_index != 0:
pres_list = cells[well_head_index].get_item_list('pressure')
# add hydrostatic initial conditions
pres_list.insert(0,\
hydro_cells[well_head_index].get_item_list('pressure')[0])
print pres_list
time_steps.insert(0,0.)
pres = np.asarray(pres_list)
pres = pres/pow(10.,3)
time_ar = np.asarray(time_steps)
f = plt.figure(num=None , dpi = 480, \
facecolor = 'w', edgecolor = 'k')
ax = f.add_subplot(111)
ax.set_xlabel('Time [days]')
ax.set_ylabel('Wellhead Pressure [kPa]')
p = plt.plot(time_ar, pres)
f.savefig('wellhead_pressure' + '.' + fmt)
plt.clf()
plt.close()
def make_cross_sections(cells, time_index, axis, index, nx):
plume = []
top = []
bot = []
y_list = []
tempsat = []
counter = 0
if axis == 1:
for cel in cells:
if (counter % nx ) == 0:
y_list.append(cel.get_y())
counter = 0
if counter == index:
plume.append(cel.get_item_list('thickness')[time_index])
top.append(cel.get_z_top())
bot.append(cel.get_z_bot())
counter +=1
elif axis == 0:
for cel in cells:
if counter / nx == 0:
y_list.append(cel.get_x())
if counter / nx == index:
plume.append(cel.get_item_list('thickness')[time_index])
top.append(cel.get_z_top())
bot.append(cel.get_z_bot())
counter +=1
zb = np.asarray(bot)
zt = np.asarray(top)
ys = np.array(y_list)
zsat = np.asarray(plume)
plume = zt - zsat
if len(ys) != len(zt) != len(zb) != len(zsat):
print "NONEQUAL LENGTH ARRAYS"
return 1
return ys, zb, zt, plume
def plot_cross_sections(cells, time_steps, nx, axis = 2, index = 32,\
fmt = 'png', yearwise = False):
# make a plot of a vertical slice:
# include top and bottom boundaries
# include sharp interface saturation thickness
print "making cross section with axis: " + str(axis) + ", index: " +\
str(index)
for i in range(0,len(cells[0].get_item_list('saturation'))):
print "Plotting Cross Section ..." + str(i)
font = { 'size' : '16'}
matplotlib.rc('font', **font)
ys, zb, zt, plume = make_cross_sections(cells, i, axis, index, nx)
f = plt.figure(num=None , dpi = 480, \
facecolor = 'w', edgecolor = 'k')
#title_string = \
#'Cross section of formation: Axis {0}, Index {1}: '.format(axis, index)
title_string = ''
yrstring = '{:4d}'.format(int(time_steps[i]/365 + 1998))
if yearwise == True:
title_string += ' in ' + yrstring
else:
title_string += 'Time t = {0} days'.format(time_steps[i])
f.suptitle(title_string)
ax = f.add_subplot(111)
ax.set_xlabel('Lateral Distance [m]')
ax.set_ylabel('Elevation [m]')
p0 = plt.plot(ys, plume, label = "CO2 Thickness")
p1 = plt.plot(ys, zb, label = "Bottom Boundary")
p2 = plt.plot(ys, zt, label = "Top Boundary")
plt.legend(loc=4)
sect_str = '{:02d}'.format(i+1)
f.savefig('cross_section_' + str(axis) + '_' + str(index) + '_' \
+ sect_str + '.' + fmt)
plt.close()
return 0
| mit |
marcocaccin/scikit-learn | examples/ensemble/plot_partial_dependence.py | 1 | 4444 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/tests/test_random_projection.py | 19 | 14015 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 41 | 3668 | from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import assert_raises, assert_true, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
hpcarcher/2015-04-16-imperial-students | testing/util/plot_city_path.py | 1 | 2772 | import csv
import sys
import ConfigParser
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
def main(argv):
"""
Plots a path between points onto a map defined by a config file.
Intended to be called by another python module.
Usage: plot_city_path.py <point coordinates file> <map properties file> <city path list>
If the third argument is omitted a basic map will be drawn.
Point Positions should have a point name, latitude and longitude.
Map Properties file should be of the format:
[figure]
height=12
width=5
[map]
projection=merc
lat_0=55.9
lon_0=-3.17
resolution=h
area_thresh=0.1
llcrnrlon=-5
llcrnrlat=55
urcrnrlon=-1.8
urcrnrlat=58
figure defines the size of the plot.
map defines the drawn map.
See the basemap documentation for the parameters listed in full.
Keyword arguments:
args -- list of arguments. This must have three elements,
the point positions file name, map properties file name and a list of point identifiers.
"""
config = ConfigParser.RawConfigParser()
config.read(argv[1])
plt.figure(figsize=(config.getint('figure', 'width'), config.getint('figure', 'height')))
map = Basemap(projection=config.get('map', 'projection'),
lat_0=config.getfloat('map', 'lat_0'), lon_0=config.getfloat('map', 'lon_0'),
resolution=config.get('map', 'resolution'), area_thresh=config.getfloat('map', 'area_thresh'),
llcrnrlon=config.getfloat('map', 'llcrnrlon'), llcrnrlat=config.getfloat('map', 'llcrnrlat'),
urcrnrlon=config.getfloat('map', 'urcrnrlon'), urcrnrlat=config.getfloat('map', 'urcrnrlat'))
map.drawcoastlines()
map.drawcountries()
map.drawrivers(color='b')
map.fillcontinents(color='green')
map.drawmapboundary()
csvfile = open(argv[0], 'rU')
csvreader = csv.reader(csvfile)
cities = {}
for row in csvreader:
cities[row[0]] = {'lat': float(row[1]), 'lon': float(row[2])}
csvfile.close()
lats = []
lons = []
path = []
if len(argv) == 3:
path = argv[2]
for value in path:
lats.append(cities[value]['lat'])
lons.append(cities[value]['lon'])
lats.append(cities[path[0]]['lat'])
lons.append(cities[path[0]]['lon'])
x, y = map(lons, lats)
for label, xpt, ypt in zip(path, x, y):
plt.text(xpt + 10000, ypt + 5000, label)
map.plot(x, y, 'D-', markersize=10, linewidth=1, color='k', markerfacecolor='b')
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 |
semplea/characters-meta | python/tools.py | 1 | 4441 | # coding: utf8
# !/usr/bin/env python
import hunspell
import pandas as pd
from math import log
import matplotlib.pyplot as plt
import seaborn as sns
import codecs
import pickle
import re
import unicodedata
from ast import literal_eval
def getScriptPath():
return "/home/alexis/Documents/EPFL/MS3/Project/python"
def getIdxOfWord(ws, w):
"""Return index of word in sentence"""
try:
wIdx = ws.index(w)
except:
wIdx = -1
return wIdx
def stem(stemmer, word):
"""
Computes a possible stem for a given word
:param word: string
The word to be stemmed
:return: string
The last possible stem in list, or the word itself if no stem found
"""
wstem = stemmer.stem(word)
if len(wstem) > 0: # and wstem[-1] not in stopwords
return unicode(wstem[-1], 'utf8')
else:
return word
def storeCount(array, key):
"""Increments value for key in store by one, or sets to 1 if key nonexistent."""
if key in array:
array[key] += 1
else:
array[key] = 1
def storeIncrement(store, key, incr):
"""
Increment value for key in store by given increment.
:param incr: float
"""
if key in store:
store[key] += incr
else:
store[key] = incr
def idxForMaxKeyValPair(array):
maxV = array[0][1]
i = 0
maxVIdx = 0
for k, v in array:
if v > maxV:
maxV = v
maxVIdx = i
i += 1
return maxVIdx
def keyForMaxValue(_dict):
maxK = ''
maxV = 0
for k, v in _dict.iteritems():
if v > maxV:
maxV = v
maxK = k
return maxK
def sortUsingList(tosort, reflist):
"""
Sorts tosort by order of reflist.
Example: tosort: ['a', 'b', 'c'], reflist: [1, 3, 2]
Return: ['a', 'c', 'b']
:param tosort:
:param reflist:
:return:
"""
return [x for (y, x) in sorted(zip(reflist, tosort))]
def sortNTopByVal(tosort, top, descending=False):
"""
Sort dictionary by descending values and return top elements.
Return list of tuples.
"""
return sorted([(k, v) for k, v in tosort.items()], key=lambda x: x[1], reverse=descending)[:top]
def buildSentsByChar(chars, sents):
"""
NOT NEEDED ANY MORE
Build map of chars to list of indices where characters occur in sents.
"""
char_sent_map = dict.fromkeys(chars, list())
for ix, sent in enumerate(sents):
for char, ix_lst in char_sent_map.iteritems():
if char in sent['nostop']:
ix_lst.append(ix)
return char_sent_map
def writeData(bookfile, char_list, wsent, sentences):
"""
Write data relevant to book to pickle files
"""
file_prefix = '../books-txt/predicted-data/'
name_prefix = bookfile.split('/')[-1][:-4] # TODO get without .txt
# write list to file, one element per line
with codecs.open(file_prefix + name_prefix + '-chars.p', mode='wb') as f:
pickle.dump(char_list, f)
# write characters sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-charsents.p', mode='wb') as f:
pickle.dump(wsent, f)
# write sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-sents.p', mode='wb') as f:
pickle.dump(sentences, f)
def getSurroundings(array, idx, window=2):
"""
Return words +-2 from idx
"""
surroundings = []
if idx > 1:
surroundings.append(array[idx - 2])
else:
surroundings.append('---')
if idx > 0:
surroundings.append(array[idx - 1])
else:
surroundings.append('---')
if idx < len(array) - 1:
surroundings.append(array[idx + 1])
else:
surroundings.append('---')
if idx < len(array) - 2:
surroundings.append(array[idx + 2])
else:
surroundings.append('---')
return surroundings
def getWindow(lst, index, window):
"""
:param lst: Some list
:param index: index at senter of window
:param window: window size -> +- window on each side
Total size of 2*window+1
"""
min_idx = index-window if index-window >= 0 else 0
max_idx = index+window if index+window < len(lst) else len(lst)-1
return range(min_idx, max_idx+1)
def removeAccents(in_str):
encoding = "utf-8"
if(is_ascii(in_str)):
in_str = in_str.decode(encoding)
in_str = unicodedata.normalize('NFKD', in_str)
in_str = in_str.encode('ASCII', 'ignore')
return in_str
def is_ascii(mystr):
try:
mystr.decode('ascii')
return True
except UnicodeDecodeError:
return False
def camelSplit(name):
"""
Returns the string split if written in Camel case
"""
return re.sub('(?!^)([A-Z][a-z]+)', r' \1', name).split()
def objFromByte(r):
try:
return literal_eval(r.content.decode('utf-8'))
except ValueError:
return None
| mit |
dsullivan7/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
Vimos/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 43 | 28175 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert_true(isinstance(estimator[0].steps[-1][1].random_state,
int))
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_)
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert_equal(len(estimators_samples), len(estimators))
assert_equal(len(estimators_samples[0]), len(X))
assert_equal(estimators_samples[0].dtype.kind, 'b')
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert_equal(bagging._max_samples, max_samples)
| bsd-3-clause |
jrper/fluidity | examples/hokkaido-nansei-oki_tsunami/raw_data/plotinputwave.py | 5 | 2520 | #!/usr/bin/env python
from fluidity_tools import stat_parser
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import getopt
import sys
import csv
def usage():
print "plotinputwave.py -b starttime -e endtime --save=basename"
def get_inputelevation(t):
InputWaveReader = csv.reader(open('InputWave.csv', 'rb'), delimiter='\t')
data=[]
for (time, heigth) in InputWaveReader:
data.append((float(time), float(heigth)))
for i in range(1,len(data)):
if data[i][0]<t:
continue
t1=data[max(0,i-1)][0]
t2=data[i][0]
h1=data[max(0,i-1)][1]
h2=data[i][1]
return h1*(t-t2)/(t1-t2)+h2*(t-t1)/(t2-t1)
print "Warning: t is outside the available data. Using last available waterheigth..."
return data[-1][1]
def main(argv=None):
dt=0.05 # use same timestep than in csv file
try:
opts, args = getopt.getopt(sys.argv[1:], "t:e:b:", ['save='])
except getopt.GetoptError:
print "Getopterror :("
usage()
sys.exit(2)
subtitle=''
subtitle_pure=''
endtime=22.5
starttime=0.0
save=False
for opt, arg in opts:
if opt == '--save':
save=True
savename=arg
elif opt=='-h' or opt=='--help':
usage()
sys.exit(2)
elif opt=='-t':
subtitle=', '+arg
subtitle_pure=arg
elif opt=='-b':
starttime=float(arg)
elif opt=='-e':
endtime=float(arg)
print "Generating plot"
print 'Using dt=', dt
starttimestep=int(max(0,starttime/dt))
endtimestep=int(endtime/dt)
print 'starttimestep=', starttimestep
print 'endtimestep=', endtimestep
# fill in measurement data
input_elevation=[]
time=[]
for i in range(starttimestep, endtimestep):
time.append(i*dt)
elev=get_inputelevation(time[-1])
input_elevation.append(elev*100.0) # in cm
plt.ion() # switch in interactive mode
fig1= figure()
subplt1 = fig1.add_subplot(111, xlabel='Time [s]', ylabel='Water level [cm]')
subplt1.plot(time, input_elevation) # plot gauge1 detector data
if not save:
plt.draw()
raw_input("Press Enter to exit")
else:
plt.savefig(savename+'.pdf', facecolor='white', edgecolor='black', dpi=100)
print 'Saved to '+savename+'.pdf'
# for i in range(timesteps):
# gauge1.append(s["water"]["FreeSurface"]["gauge1"])
if __name__ == "__main__":
main()
| lgpl-2.1 |
jzt5132/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
stuart-knock/tvb-library | tvb/simulator/plot/power_spectra_interactive.py | 3 | 18840 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
An interactive power spectra plot generated from a TVB TimeSeries datatype.
Usage
::
#Load the demo data
import numpy
data = numpy.load("demos/demo_data_region_16s_2048Hz.npy")
period = 0.00048828125 #NOTE: Providing period in seconds
#Create a tvb TimeSeries object
import tvb.datatypes.time_series
tsr = tvb.datatypes.time_series.TimeSeriesRegion()
tsr.data = data
tsr.sample_period = period
#Create and launch the interactive visualiser
import tvb.simulator.power_spectra_interactive as ps_int
psi = ps_int.PowerSpectraInteractive(time_series=tsr)
psi.show()
.. moduleauthor:: Stuart A. Knock <[email protected]>
"""
#TODO: There are fence-posts...
#TODO: add a save button, for current powerspectra view (data more than fig)
#TODO: channel/region selection, with surface time-series vertex slection
# grouped by region
import numpy
import pylab
import matplotlib.widgets as widgets
#The Virtual Brain
from tvb.simulator.common import get_logger
LOG = get_logger(__name__)
import tvb.datatypes.time_series as time_series_datatypes
import tvb.basic.traits.core as core
import tvb.basic.traits.types_basic as basic
# Define a colour theme... see: matplotlib.colors.cnames.keys()
BACKGROUNDCOLOUR = "slategrey"
EDGECOLOUR = "darkslateblue"
AXCOLOUR = "steelblue"
BUTTONCOLOUR = "steelblue"
HOVERCOLOUR = "blue"
class PowerSpectraInteractive(core.Type):
"""
The graphical interface for visualising the power-spectra (FFT) of a
timeseries provide controls for setting:
- which state-variable and mode to display [sets]
- log or linear scaling for the power or frequency axis [binary]
- sementation lenth [set]
- windowing function [set]
- power normalisation [binary] (emphasise relative frequency contribution)
- show std or sem [binary]
"""
time_series = time_series_datatypes.TimeSeries(
label = "Timeseries",
default = None,
required = True,
doc = """ The timeseries to which the FFT is to be applied.""")
first_n = basic.Integer(
label = "Display the first 'n'",
default = -1,
required = True,
doc = """Primarily intended for displaying the first N components of a
surface PCA timeseries. Defaults to -1, meaning it'll display all
of 'space' (ie, regions or vertices or channels). In other words,
for Region or M/EEG timeseries you can ignore this, but, for a
surface timeseries it really must be set.""")
def __init__(self, **kwargs):
"""
Initialise based on provided keywords or their traited defaults. Also,
initialise the place-holder attributes that aren't filled until the
show() method is called.
"""
#figure
self.ifft_fig = None
#time-series
self.fft_ax = None
#Current state
self.xscale = "linear"
self.yscale = "log"
self.mode = 0
self.variable = 0
self.show_sem = False
self.show_std = False
self.normalise_power = "no"
self.window_length = 0.25
self.window_function = "None"
#Selectors
self.xscale_selector = None
self.yscale_selector = None
self.mode_selector = None
self.variable_selector = None
self.show_sem_selector = None
self.show_std_selector = None
self.normalise_power_selector = None
self.window_length_selector = None
self.window_function_selector = None
#
possible_freq_steps = [2**x for x in range(-2, 7)] #Hz
#possible_freq_steps.append(1.0 / self.time_series_length) #Hz
self.possible_window_lengths = 1.0 / numpy.array(possible_freq_steps) #s
self.freq_step = 1.0 / self.window_length
self.frequency = None
self.spectra = None
self.spectra_norm = None
#Sliders
#self.window_length_slider = None
def configure(self):
""" Seperate configure cause ttraits be busted... """
LOG.debug("time_series shape: %s" % str(self.time_series.data.shape))
#TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
self.data = self.time_series.data[:, :, :self.first_n, :]
self.period = self.time_series.sample_period
self.max_freq = 0.5 / self.period
self.units = "Hz"
self.tpts = self.data.shape[0]
self.nsrs = self.data.shape[2]
self.time_series_length = self.tpts * self.period
self.time = numpy.arange(self.tpts) * self.period
self.labels = ["channel_%0.3d" % k for k in range(self.nsrs)]
def show(self):
""" Generate the interactive power-spectra figure. """
#Make sure everything is configured
self.configure()
#Make the figure:
self.create_figure()
#Selectors
self.add_xscale_selector()
self.add_yscale_selector()
self.add_mode_selector()
self.add_variable_selector()
self.add_normalise_power_selector()
self.add_window_length_selector()
self.add_window_function_selector()
#Sliders
#self.add_window_length_slider() #Want discrete values
#self.add_scaling_slider()
#...
self.calc_fft()
#Plot timeseries
self.plot_spectra()
pylab.show()
##------------------------------------------------------------------------##
##------------------ Functions for building the figure -------------------##
##------------------------------------------------------------------------##
def create_figure(self):
""" Create the figure and time-series axes. """
time_series_type = self.time_series.__class__.__name__
try:
figure_window_title = "Interactive power spectra: " + time_series_type
pylab.close(figure_window_title)
self.ifft_fig = pylab.figure(num = figure_window_title,
figsize = (16, 8),
facecolor = BACKGROUNDCOLOUR,
edgecolor = EDGECOLOUR)
except ValueError:
LOG.info("My life would be easier if you'd update your PyLab...")
figure_number = 42
pylab.close(figure_number)
self.ifft_fig = pylab.figure(num = figure_number,
figsize = (16, 8),
facecolor = BACKGROUNDCOLOUR,
edgecolor = EDGECOLOUR)
self.fft_ax = self.ifft_fig.add_axes([0.15, 0.2, 0.7, 0.75])
def add_xscale_selector(self):
"""
Add a radio button to the figure for selecting which scaling the x-axes
should use.
"""
pos_shp = [0.45, 0.02, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="xscale")
xscale_tuple = ("log", "linear")
self.xscale_selector = widgets.RadioButtons(rax, xscale_tuple, active=1)
self.xscale_selector.on_clicked(self.update_xscale)
def add_yscale_selector(self):
"""
Add a radio button to the figure for selecting which scaling the y-axes
should use.
"""
pos_shp = [0.02, 0.5, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="yscale")
yscale_tuple = ("log", "linear")
self.yscale_selector = widgets.RadioButtons(rax, yscale_tuple, active=0)
self.yscale_selector.on_clicked(self.update_yscale)
def add_mode_selector(self):
"""
Add a radio button to the figure for selecting which mode of the model
should be displayed.
"""
pos_shp = [0.02, 0.07, 0.05, 0.1+0.002*self.data.shape[3]]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
mode_tuple = tuple(range(self.data.shape[3]))
self.mode_selector = widgets.RadioButtons(rax, mode_tuple, active=0)
self.mode_selector.on_clicked(self.update_mode)
def add_variable_selector(self):
"""
Generate radio selector buttons to set which state variable is
displayed.
"""
noc = self.data.shape[1] # number of choices
#State variable for the x axis
pos_shp = [0.02, 0.22, 0.05, 0.12+0.008*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="state variable")
self.variable_selector = widgets.RadioButtons(rax, tuple(range(noc)),
active=0)
self.variable_selector.on_clicked(self.update_variable)
def add_window_length_selector(self):
"""
Generate radio selector buttons to set the window length is seconds.
"""
noc = self.possible_window_lengths.shape[0] # number of choices
#State variable for the x axis
pos_shp = [0.88, 0.07, 0.1, 0.12+0.02*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="Segment length")
wl_tup = tuple(self.possible_window_lengths)
self.window_length_selector = widgets.RadioButtons(rax, wl_tup, active=4)
self.window_length_selector.on_clicked(self.update_window_length)
def add_window_function_selector(self):
"""
Generate radio selector buttons to set the windowing function.
"""
#TODO: add support for kaiser, requiers specification of beta.
wf_tup = ("None", "hamming", "bartlett", "blackman", "hanning")
noc = len(wf_tup) # number of choices
#State variable for the x axis
pos_shp = [0.88, 0.77, 0.085, 0.12+0.01*noc]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR,
title="Windowing function")
self.window_function_selector = widgets.RadioButtons(rax, wf_tup, active=0)
self.window_function_selector.on_clicked(self.update_window_function)
def add_normalise_power_selector(self):
"""
Add a radio button to chose whether or not the power of all spectra
shouold be normalised to 1.
"""
pos_shp = [0.02, 0.8, 0.05, 0.104]
rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="normalise")
np_tuple = ("yes", "no")
self.normalise_power_selector = widgets.RadioButtons(rax, np_tuple, active=1)
self.normalise_power_selector.on_clicked(self.update_normalise_power)
##------------------------------------------------------------------------##
##------------------ Functions for updating the state --------------------##
##------------------------------------------------------------------------##
def calc_fft(self):
"""
Calculate FFT using current state of the window_length, window_function,
"""
#Segment time-series, overlapping if necessary
nseg = int(numpy.ceil(self.time_series_length / self.window_length))
if nseg != 1:
seg_tpts = self.window_length / self.period
overlap = ((seg_tpts * nseg) - self.tpts) / (nseg-1)
starts = [max(seg*(seg_tpts - overlap), 0) for seg in range(nseg)]
segments = [self.data[start:start+seg_tpts] for start in starts]
segments = [segment[:, :, :, numpy.newaxis] for segment in segments]
time_series = numpy.concatenate(segments, axis=4)
else:
time_series = self.data[:, :, :, :, numpy.newaxis]
seg_tpts = time_series.shape[0]
#Base-line correct segmented time-series
time_series = time_series - time_series.mean(axis=0)[numpy.newaxis, :]
#Apply windowing function
if self.window_function != "None":
window_function = eval("".join(("numpy.", self.window_function)))
window_mask = numpy.reshape(window_function(seg_tpts),
(seg_tpts, 1, 1, 1, 1))
time_series = time_series * window_mask
#Calculate the FFT
result = numpy.fft.fft(time_series, axis=0)
nfreq = len(result)/2
self.frequency = numpy.arange(0, self.max_freq, self.freq_step)
LOG.debug("frequency shape: %s" % str(self.frequency.shape))
self.spectra = numpy.mean(numpy.abs(result[1:nfreq+1])**2, axis=-1)
LOG.debug("spectra shape: %s" % str(self.spectra.shape))
self.spectra_norm = (self.spectra / numpy.sum(self.spectra, axis=0))
LOG.debug("spectra_norm shape: %s" % str(self.spectra_norm.shape))
#import pdb; pdb.set_trace()
# self.spectra_std = numpy.std(numpy.abs(result[:nfreq]), axis=4)
# self.spectra_sem = self.spectra_std / time_series.shape[4]
##------------------------------------------------------------------------##
##------------------ Functions for updating the figure -------------------##
##------------------------------------------------------------------------##
def update_xscale(self, xscale):
"""
Update the FFT axes' xscale to either log or linear based on radio
button selection.
"""
self.xscale = xscale
self.fft_ax.set_xscale(self.xscale)
pylab.draw()
def update_yscale(self, yscale):
"""
Update the FFT axes' yscale to either log or linear based on radio
button selection.
"""
self.yscale = yscale
self.fft_ax.set_yscale(self.yscale)
pylab.draw()
def update_mode(self, mode):
""" Update the visualised mode based on radio button selection. """
self.mode = mode
self.plot_spectra()
def update_variable(self, variable):
"""
Update state variable being plotted based on radio buttton selection.
"""
self.variable = variable
self.plot_spectra()
def update_normalise_power(self, normalise_power):
""" Update whether to normalise based on radio button selection. """
self.normalise_power = normalise_power
self.plot_spectra()
def update_window_length(self, length):
"""
Update timeseries window length based on the selected value.
"""
#TODO: need this casting but not sure why, don't need int() with mode...
self.window_length = numpy.float64(length)
#import pdb; pdb.set_trace()
self.freq_step = 1.0 / self.window_length
self.update_spectra()
def update_window_function(self, window_function):
"""
Update windowing function based on the radio button selection.
"""
self.window_function = window_function
self.update_spectra()
def update_spectra(self):
""" Clear the axes and redraw the power-spectra. """
self.calc_fft()
self.plot_spectra()
# def plot_std(self):
# """ Plot """
# std = (self.spectra[:, self.variable, :, self.mode] +
# self.spectra_std[:, self.variable, :, self.mode])
# self.fft_ax.plot(self.frequency, std, "--")
#
#
# def plot_sem(self):
# """ """
# sem = (self.spectra[:, self.variable, :, self.mode] +
# self.spectra_sem[:, self.variable, :, self.mode])
# self.fft_ax.plot(self.frequency, sem, ":")
def plot_spectra(self):
""" Plot the power spectra. """
self.fft_ax.clear()
# Set title and axis labels
time_series_type = self.time_series.__class__.__name__
self.fft_ax.set(title = time_series_type)
self.fft_ax.set(xlabel = "Frequency (%s)" % self.units)
self.fft_ax.set(ylabel = "Power")
# Set x and y scale based on curent radio button selection.
self.fft_ax.set_xscale(self.xscale)
self.fft_ax.set_yscale(self.yscale)
if hasattr(self.fft_ax, 'autoscale'):
self.fft_ax.autoscale(enable=True, axis='both', tight=True)
#import pdb; pdb.set_trace()
#Plot the power spectra
if self.normalise_power == "yes":
self.fft_ax.plot(self.frequency,
self.spectra_norm[:, self.variable, :, self.mode])
else:
self.fft_ax.plot(self.frequency,
self.spectra[:, self.variable, :, self.mode])
# #TODO: Need to ensure colour matching... and allow region selection.
# #If requested, add standard deviation
# if self.show_std:
# self.plot_std(self)
#
# #If requested, add standard error in mean
# if self.show_sem:
# self.plot_sem(self)
pylab.draw()
if __name__ == "__main__":
# Do some stuff that tests or makes use of this module...
LOG.info("Testing %s module..." % __file__)
try:
data = numpy.load("../demos/demo_data_region_16s_2048Hz.npy")
except IOError:
LOG.error("Can't load demo data. Run demos/generate_region_demo_data.py")
raise
period = 0.00048828125 #NOTE: Providing period in seconds
tsr = time_series_datatypes.TimeSeriesRegion()
tsr.data = data
tsr.sample_period = period
psi = PowerSpectraInteractive(time_series=tsr)
psi.show()
| gpl-2.0 |
meduz/scikit-learn | sklearn/utils/__init__.py | 5 | 13264 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import DataConversionWarning
from .deprecation import deprecated
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric", "indices_to_mask", "deprecated"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d"
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=np.bool)
mask[indices] = True
return mask
| bsd-3-clause |
bokeh/bokeh | examples/app/export_csv/main.py | 1 | 1440 | from os.path import dirname, join
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (Button, ColumnDataSource, CustomJS, DataTable,
NumberFormatter, RangeSlider, TableColumn)
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
source = ColumnDataSource(data=dict())
def update():
current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
slider = RangeSlider(title="Max Salary", start=10000, end=110000, value=(10000, 50000), step=1000, format="0,0")
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
button.js_on_click(CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read()))
columns = [
TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
TableColumn(field="years_experience", title="Experience (years)")
]
data_table = DataTable(source=source, columns=columns, width=800)
controls = column(slider, button)
curdoc().add_root(row(controls, data_table))
curdoc().title = "Export CSV"
update()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/preprocessing/label.py | 35 | 28877 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
haojunyu/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
potash/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
bthirion/nipy | nipy/algorithms/clustering/ggmixture.py | 2 | 20751 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
One-dimensional Gamma-Gaussian mixture density classes : Given a set
of points the algo provides approcumate maximum likelihood estimates
of the mixture distribution using an EM algorithm.
Author: Bertrand Thirion and Merlin Keller 2005-2008
"""
import numpy as np
import scipy.stats as st
import scipy.special as sp
#############################################################################
# Auxiliary functions #######################################################
#############################################################################
def _dichopsi_log(u, v, y, eps=0.00001):
""" Implements the dichotomic part of the solution of psi(c)-log(c)=y
"""
if u > v:
u, v = v, u
t = (u + v) / 2
if np.absolute(u - v) < eps:
return t
else:
if sp.psi(t) - np.log(t) > y:
return _dichopsi_log(u, t, y, eps)
else:
return _dichopsi_log(t, v, y, eps)
def _psi_solve(y, eps=0.00001):
""" Solve psi(c)-log(c)=y by dichotomy
"""
if y > 0:
print "y", y
raise ValueError("y>0, the problem cannot be solved")
u = 1.
if y > sp.psi(u) - np.log(u):
while sp.psi(u) - np.log(u) < y:
u *= 2
u /= 2
else:
while sp.psi(u) - np.log(u) > y:
u /= 2
return _dichopsi_log(u, 2 * u, y, eps)
def _compute_c(x, z, eps=0.00001):
"""
this function returns the mle of the shape parameter if a 1D gamma
density
"""
eps = 1.e-7
y = np.dot(z, np.log(x)) / np.sum(z) - np.log(np.dot(z, x) / np.sum(z))
if y > - eps:
c = 10
else:
c = _psi_solve(y, eps=0.00001)
return c
def _gaus_dens(mean, var, x):
""" evaluate the gaussian density (mean,var) at points x
"""
Q = - (x - mean) ** 2 / (2 * var)
return 1. / np.sqrt(2 * np.pi * var) * np.exp(Q)
def _gam_dens(shape, scale, x):
"""evaluate the gamma density (shape,scale) at points x
Notes
-----
Returns 0 on negative subspace
"""
ng = np.zeros(np.size(x))
cst = - shape * np.log(scale) - sp.gammaln(shape)
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
lz = cst + (shape - 1) * np.log(x[i]) - x[i] / scale
ng[i] = np.exp(lz)
return ng
def _gam_param(x, z):
""" Compute the parameters of a gamma density from data weighted points
Parameters
----------
x: array of shape(nbitem) the learning points
z: array of shape(nbitem), their membership within the class
Notes
-----
if no point is positive then the couple (1, 1) is returned
"""
eps = 1.e-5
i = np.ravel(np.nonzero(x > 0))
szi = np.sum(z[i])
if szi > 0:
shape = _compute_c(x[i], z[i], eps)
scale = np.dot(x[i], z[i]) / (szi * shape)
else:
shape = 1
scale = 1
return shape, scale
##############################################################################
# class `Gamma`
##############################################################################
class Gamma(object):
""" Basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 parameters are used:
- mean: gaussian mean
- var: gaussian variance
- shape: gamma shape
- scale: gamma scale
- mixt: mixture parameter (weight of the gamma)
"""
def __init__(self, shape=1, scale=1):
self.shape = shape
self.scale = scale
def parameters(self):
print "shape: ", self.shape, "scale: ", self.scale
def check(self, x):
if (x.min() < 0):
raise ValueError("negative values in input")
def estimate(self, x, eps=1.e-7):
"""
ML estimation of the Gamma parameters
"""
self.check(x)
n = np.size(x)
y = np.sum(np.log(x)) / n - np.log(np.sum(x) / n)
if y > - eps:
self.shape = 1
else:
self.shape = _psi_solve(y)
self.scale = np.sum(x) / (n * self.shape)
##############################################################################
# Gamma-Gaussian Mixture class
##############################################################################
class GGM(object):
"""
This is the basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 scalar members
- mean: gaussian mean
- var: gaussian variance (non-negative)
- shape: gamma shape (non-negative)
- scale: gamma scale (non-negative)
- mixt: mixture parameter (non-negative, weight of the gamma)
"""
def __init__(self, shape=1, scale=1, mean=0, var=1, mixt=0.5):
self.shape = shape
self.scale = scale
self.mean = mean
self.var = var
self.mixt = mixt
def parameters(self):
""" print the paramteres of self
"""
print "Gaussian: mean: ", self.mean, "variance: ", self.var
print "Gamma: shape: ", self.shape, "scale: ", self.scale
print "Mixture gamma: ", self.mixt, "Gaussian: ", 1 - self.mixt
def Mstep(self, x, z):
"""
Mstep of the model: maximum likelihood
estimation of the parameters of the model
Parameters
----------
x : array of shape (nbitems,)
input data
z array of shape(nbitrems, 2)
the membership matrix
"""
# z[0,:] is the likelihood to be generated by the gamma
# z[1,:] is the likelihood to be generated by the gaussian
tiny = 1.e-15
sz = np.maximum(tiny, np.sum(z, 0))
self.shape, self.scale = _gam_param(x, z[:, 0])
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
self.mixt = sz[0] / np.size(x)
def Estep(self, x):
"""
E step of the estimation:
Estimation of ata membsership
Parameters
----------
x: array of shape (nbitems,)
input data
Returns
-------
z: array of shape (nbitems, 2)
the membership matrix
"""
eps = 1.e-15
z = np.zeros((np.size(x), 2), 'd')
z[:, 0] = _gam_dens(self.shape, self.scale, x)
z[:, 1] = _gaus_dens(self.mean, self.var, x)
z = z * np.array([self.mixt, 1. - self.mixt])
sz = np.maximum(np.sum(z, 1), eps)
L = np.sum(np.log(sz)) / np.size(x)
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=10, delta=0.0001, verbose=False):
""" Complete EM estimation procedure
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
niter : int, optional
max nb of iterations
delta : float, optional
criterion for convergence
verbose : bool, optional
If True, print values during iterations
Returns
-------
LL, float
average final log-likelihood
"""
if x.max() < 0:
# all the values are generated by the Gaussian
self.mean = np.mean(x)
self.var = np.var(x)
self.mixt = 0.
L = 0.5 * (1 + np.log(2 * np.pi * self.var))
return L
# proceed with standard estimate
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
z, L = self.Estep(x)
if verbose:
print i, L
if (L < L0 + delta):
break
L0 = L
return L
def show(self, x):
""" Visualization of the mm based on the empirical histogram of x
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
"""
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
p = self.mixt
dc = c[1] - c[0]
y = (1 - p) * _gaus_dens(self.mean, self.var, c) * dc
z = np.zeros(np.size(c))
z = _gam_dens(self.shape, self.scale, c) * p * dc
import matplotlib.pylab as mp
mp.figure()
mp.plot(0.5 * (c[1:] + c[:-1]), h)
mp.plot(c, y, 'r')
mp.plot(c, z, 'g')
mp.plot(c, z + y, 'k')
mp.title('Fit of the density with a Gamma-Gaussians mixture')
mp.legend(('data', 'gaussian acomponent', 'gamma component',
'mixture distribution'))
def posterior(self, x):
"""Posterior probability of observing the data x for each component
Parameters
----------
x: array of shape (nbitems,)
the data to be processed
Returns
-------
y, pg : arrays of shape (nbitem)
the posterior probability
"""
p = self.mixt
pg = p * _gam_dens(self.shape, self.scale, x)
y = (1 - p) * _gaus_dens(self.mean, self.var, x)
return y / (y + pg), pg / (y + pg)
##############################################################################
# double-Gamma-Gaussian Mixture class
##############################################################################
class GGGM(object):
"""
The basic one dimensional Gamma-Gaussian-Gamma Mixture estimation
class, where the first gamma has a negative sign, while the second
one has a positive sign.
7 parameters are used:
- shape_n: negative gamma shape
- scale_n: negative gamma scale
- mean: gaussian mean
- var: gaussian variance
- shape_p: positive gamma shape
- scale_p: positive gamma scale
- mixt: array of mixture parameter
(weights of the n-gamma,gaussian and p-gamma)
"""
def __init__(self, shape_n=1, scale_n=1, mean=0, var=1,
shape_p=1, scale_p=1, mixt=np.array([1.0, 1.0, 1.0]) / 3):
""" Constructor
Parameters
-----------
shape_n : float, optional
scale_n: float, optional
parameters of the nehative gamma; must be positive
mean : float, optional
var : float, optional
parameters of the gaussian ; var must be positive
shape_p : float, optional
scale_p : float, optional
parameters of the positive gamma; must be positive
mixt : array of shape (3,), optional
the mixing proportions; they should be positive and sum to 1
"""
self.shape_n = shape_n
self.scale_n = scale_n
self.mean = mean
self.var = var
self.shape_p = shape_p
self.scale_p = scale_p
self.mixt = mixt
def parameters(self):
""" Print the parameters
"""
print "Negative Gamma: shape: ", self.shape_n,\
"scale: ", self.scale_n
print "Gaussian: mean: ", self.mean, "variance: ", self.var
print "Poitive Gamma: shape: ", self.shape_p, "scale: ",\
self.scale_p
mixt = self.mixt
print "Mixture neg. gamma: ", mixt[0], "Gaussian: ", mixt[1],\
"pos. gamma: ", mixt[2]
def init(self, x, mixt=None):
"""
initialization of the differnt parameters
Parameters
----------
x: array of shape(nbitems)
the data to be processed
mixt : None or array of shape(3), optional
prior mixing proportions. If None, the classes have equal weight
"""
if mixt != None:
if np.size(mixt) == 3:
self.mixt = np.ravel(mixt)
else:
raise ValueError('bad size for mixt')
# gaussian
self.mean = np.mean(x)
self.var = np.var(x)
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
mn = - np.mean(x[i])
vn = np.var(x[i])
self.scale_n = vn / mn
self.shape_n = mn ** 2 / vn
else:
self.mixt[0] = 0
# positive gamma
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
mp = np.mean(x[i])
vp = np.var(x[i])
self.scale_p = vp / mp
self.shape_p = mp ** 2 / vp
else:
self.mixt[2] = 0
# mixing proportions
self.mixt = self.mixt / np.sum(self.mixt)
def init_fdr(self, x, dof=-1, copy=True):
"""
Initilization of the class based on a fdr heuristic: the
probability to be in the positive component is proportional to
the 'positive fdr' of the data. The same holds for the
negative part. The point is that the gamma parts should model
nothing more that the tails of the distribution.
Parameters
----------
x: array of shape(nbitem)
the data under consideration
dof: integer, optional
number of degrees of freedom if x is thought to be a student
variate. By default, it is handeled as a normal
copy: boolean, optional
If True, copy the data.
"""
# Safeguard ourselves against modifications of x, both by our
# code, and by external code.
if copy:
x = x.copy()
# positive gamma
i = np.ravel(np.nonzero(x > 0))
from ..statistics.empirical_pvalue import fdr
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.sf(x)
else:
pvals = st.t.sf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.mixt[2] = np.maximum(0.5, z.sum()) / np.size(x)
self.shape_p, self.scale_p = _gam_param(x[i], z)
else:
self.mixt[2] = 0
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.cdf(x)
else:
pvals = st.t.cdf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.shape_n, self.scale_n = _gam_param( - x[i], z)
self.mixt[0] = np.maximum(0.5, z.sum()) / np.size(x)
else:
self.mixt[0] = 0
self.mixt[1] = 1 - self.mixt[0] - self.mixt[2]
def Mstep(self, x, z):
"""
Mstep of the estimation:
Maximum likelihood update the parameters of the three components
Parameters
------------
x: array of shape (nbitem,)
input data
z: array of shape (nbitems,3)
probabilistic membership
"""
tiny = 1.e-15
sz = np.maximum(np.sum(z, 0), tiny)
self.mixt = sz / np.sum(sz)
# negative gamma
self.shape_n, self.scale_n = _gam_param( - x, z[:, 0])
# gaussian
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
# positive gamma
self.shape_p, self.scale_p = _gam_param(x, z[:, 2])
def Estep(self, x):
""" Update probabilistic memberships of the three components
Parameters
----------
x: array of shape (nbitems,)
the input data
Returns
-------
z: ndarray of shape (nbitems, 3)
probabilistic membership
Notes
-----
z[0,:] is the membership the negative gamma
z[1,:] is the membership of the gaussian
z[2,:] is the membership of the positive gamma
"""
tiny = 1.e-15
z = np.array(self.component_likelihood(x)).T * self.mixt
sz = np.maximum(tiny, np.sum(z, 1))
L = np.mean(np.log(sz))
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=100, delta=1.e-4, bias=0, verbose=0,
gaussian_mix=0):
""" Whole EM estimation procedure:
Parameters
----------
x: array of shape (nbitem)
input data
niter: integer, optional
max number of iterations
delta: float, optional
increment in LL at which convergence is declared
bias: float, optional
lower bound on the gaussian variance (to avoid shrinkage)
gaussian_mix: float, optional
if nonzero, lower bound on the gaussian mixing weight
(to avoid shrinkage)
verbose: 0, 1 or 2
verbosity level
Returns
-------
z: array of shape (nbitem, 3)
the membership matrix
"""
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
# Constraint the Gaussian variance
if bias > 0:
self.var = np.maximum(bias, self.var)
# Constraint the Gaussian mixing ratio
if gaussian_mix > 0 and self.mixt[1] < gaussian_mix:
upper, gaussian, lower = self.mixt
upper_to_lower = upper / (lower + upper)
gaussian = gaussian_mix
upper = (1 - gaussian_mix) * upper_to_lower
lower = 1 - gaussian_mix - upper
self.mixt = lower, gaussian, upper
z, L = self.Estep(x)
if verbose:
print i, L
if (L < L0 + delta):
break
L0 = L
return z
def posterior(self, x):
"""
Compute the posterior probability of the three components
given the data
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
the posteriori of the 3 components given the data
Notes
-----
ng + y + pg = np.ones(nbitem)
"""
p = self.mixt
ng, y, pg = self.component_likelihood(x)
total = ng * p[0] + y * p[1] + pg * p[2]
return ng * p[0] / total, y * p[1] / total, pg * p[2] / total
def component_likelihood(self, x):
"""
Compute the likelihood of the data x under
the three components negative gamma, gaussina, positive gaussian
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
The likelihood of the data under the 3 components
"""
ng = _gam_dens(self.shape_n, self.scale_n, - x)
y = _gaus_dens(self.mean, self.var, x)
pg = _gam_dens(self.shape_p, self.scale_p, x)
return ng, y, pg
def show(self, x, mpaxes=None):
""" Visualization of mixture shown on the empirical histogram of x
Parameters
----------
x: ndarray of shape (nditem,)
data
mpaxes: matplotlib axes, optional
axes handle used for the plot if None, new axes are created.
"""
import matplotlib.pylab as mp
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
dc = c[1] - c[0]
ng = self.mixt[0] * _gam_dens(self.shape_n, self.scale_n, - c)
y = self.mixt[1] * _gaus_dens(self.mean, self.var, c)
pg = self.mixt[2] * _gam_dens(self.shape_p, self.scale_p, c)
z = y + pg + ng
if mpaxes == None:
mp.figure()
ax = mp.subplot(1, 1, 1)
else:
ax = mpaxes
ax.plot(0.5 * (c[1:] + c[:-1]), h / dc, linewidth=2, label='data')
ax.plot(c, ng, 'c', linewidth=2, label='negative gamma component')
ax.plot(c, y, 'r', linewidth=2, label='Gaussian component')
ax.plot(c, pg, 'g', linewidth=2, label='positive gamma component')
ax.plot(c, z, 'k', linewidth=2, label='mixture distribution')
ax.set_title('Fit of the density with a Gamma-Gaussian mixture',
fontsize=12)
l = ax.legend()
for t in l.get_texts():
t.set_fontsize(12)
ax.set_xticklabels(ax.get_xticks(), fontsize=12)
ax.set_yticklabels(ax.get_yticks(), fontsize=12)
| bsd-3-clause |
pbrod/scipy | scipy/spatial/_spherical_voronoi.py | 12 | 11835 | """
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as Scipy.
#
import numpy as np
import numpy.matlib
import scipy
import itertools
from . import _voronoi
__all__ = ['SphericalVoronoi']
def calc_circumcenters(tetrahedrons):
""" Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points to construct a spherical
Voronoi diagram from
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Notes
----------
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
>>> center = np.array([0, 0, 0])
>>> radius = 1
>>> # calculate spherical Voronoi diagram
>>> sv = SphericalVoronoi(points, radius, center)
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> # generate plot
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None):
"""
Initializes the object and starts the computation of the Voronoi
diagram.
points : The generator points of the Voronoi diagram assumed to be
all on the sphere with radius supplied by the radius parameter and
center supplied by the center parameter.
radius : The radius of the sphere. Will default to 1 if not supplied.
center : The center of the sphere. Will default to the origin if not
supplied.
"""
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
# tetrahedrons will have shape: (2N-4, 4, 3)
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
# circumcenters will have shape: (2N-4, 3)
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
# self.vertices will have shape: (2N-4, 3)
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
# simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(self._tri.simplices.shape[0])
# tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices, simplex_indices,
simplex_indices]).ravel()
# point_indices will have shape: (6N-12,)
point_indices = self._tri.simplices.ravel()
# array_associations will have shape: (6N-12, 2)
array_associations = np.dstack((point_indices, tri_indices))[0]
array_associations = array_associations[np.lexsort((
array_associations[...,1],
array_associations[...,0]))]
array_associations = array_associations.astype(np.intp)
# group by generator indices to produce
# unsorted regions in nested list
groups = []
for k, g in itertools.groupby(array_associations,
lambda t: t[0]):
groups.append(list(list(zip(*list(g)))[1]))
self.regions = groups
def sort_vertices_of_regions(self):
"""
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
_voronoi.sort_vertices_of_regions(self._tri.simplices,
self.regions)
| bsd-3-clause |
milankl/swm | docu/matvis/scripts/matvis.py | 1 | 2022 | ## PLOTTING OPERATOR MATRICES
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
import matplotlib.pyplot as plt
from cmocean import cm
# import functions
exec(open(path+'swm_operators.py').read())
exec(open(path+'swm_output.py').read())
param = dict()
param['output'] = 0
param['dat_type'] = np.float32
param['dx'] = 1
param['dy'] = 1
param['nx'] = 3
param['ny'] = 3
param['NT'] = param['nx']*param['ny']
param['Nu'] = (param['nx']-1)*param['ny']
param['Nv'] = (param['ny']-1)*param['nx']
param['Nq'] = (param['nx']+1)*(param['ny']+1)
set_grad_mat()
set_interp_mat()
set_lapl_mat()
set_arakawa_mat()
##
#mnames = ['GTx','GTy','Gux','Guy','Gvx','Gvy','Gqy','Gqx']
#mnames = ['Lu','Lv','LT']
mnames = ['IuT','ITu','IvT','ITv','Iuv','Ivu','ITq','IqT','Iqu','Iuq','Iqv','Ivq']
for mname in mnames:
exec('M = '+mname)
levs = np.sort(np.array(list(set(M.data))))
linlevs = np.arange(1,len(levs)+1)
# replace data by linlevs
idx = []
for l in levs:
idx.append(M.data == l)
for i,r in zip(idx,range(len(idx))):
M.data[i] = linlevs[r]
M = M.todense()
M = np.ma.masked_array(M,mask=(M == 0))
aspectratio = M.shape[0]/M.shape[1]
fig,ax = plt.subplots(1,1,figsize=(6,5*aspectratio))
if len(levs) > 1:
cmapd = cm.thermal.from_list('cmapd',plt.cm.jet(np.linspace(0,1,len(linlevs))),len(linlevs))
else:
cmapd = cm.thermal.from_list('cmapd',plt.cm.gray([0,1]),2)
q = ax.matshow(M,cmap=cmapd,vmin=.5,vmax=linlevs.max()+.5)
cbar = plt.colorbar(q,ax=ax,ticks=linlevs,drawedges=True)
cbar.ax.set_yticklabels(levs)
ax.set_xlabel(r'$\mathbf{'+mname[0]+'}^'+mname[2]+'_'+mname[1]+'$ for $n_x =$%i, $n_y =$%i' % (param['nx'],param['ny']),fontsize=20)
plt.tight_layout()
fig.savefig(path+'matvis/img/'+mname+'.png',dpi=150)
plt.close(fig)
#plt.show() | gpl-3.0 |
albahnsen/ML_SecurityInformatics | notebooks/m22_model_deployment.py | 1 | 1040 | #!/usr/bin/python
import pandas as pd
from sklearn.externals import joblib
import sys
def predict_proba(url):
clf = joblib.load('22_clf_rf.pkl')
url_ = pd.DataFrame([url], columns=['url'])
# Create features
keywords = ['https', 'login', '.php', '.html', '@', 'sign']
for keyword in keywords:
url_['keyword_' + keyword] = url_.url.str.contains(keyword).astype(int)
url_['lenght'] = url_.url.str.len() - 2
domain = url_.url.str.split('/', expand=True).iloc[:, 2]
url_['lenght_domain'] = domain.str.len()
url_['isIP'] = (url_.url.str.replace('.', '') * 1).str.isnumeric().astype(int)
url_['count_com'] = url_.url.str.count('com')
# Make prediction
p1 = clf.predict_proba(url_.drop('url', axis=1))[0,1]
return p1
if __name__ == "__main__":
if len(sys.argv) == 1:
print('Please add an URL')
else:
url = sys.argv[1]
p1 = predict_proba(url)
print(url)
print('Probability of Phishing: ', p1)
| mit |
r-rathi/error-control-coding | perf/plot-pwl.py | 1 | 1595 | import numpy as np
import matplotlib.pyplot as plt
import itertools
from errsim import *
def pw_pl_label(pe, pb, n):
if pb is None:
pb = pe
pwlab = '$p_W(w)$ pe={} n={} BSC'.format(pe, n)
pllab = '$p_L(l)$ pe={} n={} BSC'.format(pe, n)
else:
pwlab = '$p_W(w)$ pe={} n={} pb={}'.format(pe, n, pb)
pllab = '$p_L(l)$ pe={} n={} pb={}'.format(pe, n, pb)
pWL = jointpmf5(pe, pb, n)
pW = pWL.sum(axis=1)
pL = pWL.sum(axis=0)
return pW, pL, pwlab, pllab
def plot_pwl(pe, fpath=None):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
x = np.arange(129)
pW, pL, pwlab, pllab = pw_pl_label(pe, None, 128)
ax.plot(x, pW[x], 'g--', lw=2, label=pwlab)
ax.plot(x, pL[x], 'g-', lw=2, label=pllab)
pW, pL, pwlab, pllab = pw_pl_label(pe, .1, 128)
ax.plot(x, pW[x], 'b--', lw=2, label=pwlab)
ax.plot(x, pL[x], 'b-', lw=2, label=pllab)
pW, pL, pwlab, pllab = pw_pl_label(pe, .5, 128)
ax.plot(x, pW[x], 'r--', lw=2, label=pwlab)
ax.plot(x, pL[x], 'r-', lw=2, label=pllab)
ax.set_yscale('log')
ax.set_xticks(x[::10])
ax.set_xlim(x[0], x[-1])
ax.set_ylim(pe ** (2 * 1.2), 1e-1)
ax.set_xlabel('Error weight, $w$ or Error length, $l$')
ax.set_ylabel('Probability')
ax.set_title('Error weight and length distributions')
ax.legend() #prop={'family': 'monospace'}) #fontsize=12)
ax.grid(True)
if fpath:
fig.savefig(fpath)
plt.show()
plt.close('all')
plot_pwl(1e-15, 'plots/pwl-pe=1e15.png')
plot_pwl(1e-6, 'plots/pwl-pe=1e6.png')
| mit |
zorojean/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
DLTK/DLTK | setup.py | 1 | 2163 | #!/usr/bin/env python
from setuptools import setup, find_packages
__version__ = None
exec(open('dltk/version.py').read())
test_require = ['pytest', 'pytest-flake8', 'pytest-cov', 'python-coveralls']
setup(name='dltk',
version=__version__,
description='Deep Learning Toolkit for Medical Image Analysis',
author='DLTK contributors',
url='https://dltk.github.io',
packages=find_packages(exclude=['docs', 'contrib', 'data', 'examples']),
keywords=['machine learning', 'tensorflow', 'deep learning',
'biomedical imaging'],
license='Apache License 2.0',
classifiers=['Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'],
install_requires=['numpy>=1.14.0', 'scipy>=0.19.0', 'pandas>=0.19.0',
'matplotlib>=1.5.3', 'future>=0.16.0', 'xlrd>=1.1.0',
'scikit-image>=0.13.0', 'SimpleITK>=1.0.0',
'jupyter>=1.0.0', 'argparse'],
tests_require=test_require,
extras_require={'docs': ['sphinx==1.5.6', 'sphinx-rtd-theme',
'recommonmark', 'sphinx-autobuild',
'sphinxcontrib-versioning'],
'tests': test_require}
)
print("\nWelcome to DLTK!")
print("If any questions please visit documentation page "
"https://dltk.github.io/dltk")
print("or join community chat on https://gitter.im/DLTK/DLTK")
try:
import tensorflow
except ImportError:
print('We did not find TensorFlow on your system. Please install it via '
'`pip install tensorflow-gpu` if you have a CUDA-enabled GPU or with '
'`pip install tensorflow` without GPU support.')
| apache-2.0 |
dhruv13J/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
NMTHydro/Recharge | utils/jornada_plot.py | 1 | 43872 | # ===============================================================================
# Copyright 2016 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
import pandas as pd
import numpy as np
import datetime
from matplotlib import pyplot as plt
from datetime import datetime
# ============= local library imports ===========================
# =================================== run() exclusive functions =============================================
def plot_depths(dates, d30, d60, d90, d110, d130, name, tdate, rzsm, taw, pixel):
""""""
figure_title = "rzsm_5_depths_{}_TAW_{}_pixel_{}".format(name, taw, pixel)
fig = plt.figure()
fig.suptitle("Soil moisture at different depths for {} at TAW = {} corr to pixel {}".format(name, taw, pixel), fontsize=12, fontweight='bold')
plt.rcParams['axes.grid'] = True
p30 = fig.add_subplot(511)
p30.set_title("30cm depth", fontsize=10, fontweight='bold')
# p30.set_xlabel('date', fontsize=8)
p30.set_ylabel('swc')
p30.plot(dates, d30, linewidth=1)
p30.plot(tdate, rzsm, linewidth=1)
p30.plot_date(dates, d30, marker='o', markersize=2)
p60 = fig.add_subplot(512)
p60.set_title("60cm depth", fontsize=10, fontweight='bold')
# p60.set_xlabel('date', fontsize=8)
p60.set_ylabel('swc')
p60.plot(dates, d60, linewidth=1)
p60.plot(tdate, rzsm, linewidth=1)
p60.plot_date(dates, d60, marker='o', markersize=2)
p90 = fig.add_subplot(513)
p90.set_title("90cm depth", fontsize=10, fontweight='bold')
# p90.set_xlabel('date', fontsize=8)
p90.set_ylabel('swc')
p90.plot(dates, d90, linewidth=1)
p90.plot(tdate, rzsm, linewidth=1)
p90.plot_date(dates, d90, marker='o', markersize=2)
p110 = fig.add_subplot(514)
p110.set_title("110cm depth", fontsize=10, fontweight='bold')
# p110.set_xlabel('date', fontsize=8)
p110.set_ylabel('swc')
p110.plot(dates, d110, linewidth=1)
p110.plot(tdate, rzsm, linewidth=1)
p110.plot_date(dates, d110, marker='o', markersize=2)
# p110.grid()
p130 = fig.add_subplot(515)
p130.set_title("130cm depth", fontsize=10, fontweight='bold')
# p130.set_xlabel('date', fontsize=8)
p130.set_ylabel('swc')
p130.plot(dates, d130, linewidth=1)
p130.plot(tdate, rzsm, linewidth=1)
p130.plot_date(dates, d130, marker='o', markersize=2)
# p130.grid()
# plt.tight_layout()
# plt.subplots_adjust(top=0.89)
plt.subplots_adjust(hspace=.5)
# plt.show()
plt.savefig("/Users/Gabe/Desktop/juliet_stuff/jornada_plot_output/{}.pdf".format(figure_title))
plt.close(fig)
def make_set(df):
""""""
location_list = []
locations = df['location']
for i in locations:
# print "location", i
if i.startswith("C"):
location_list.append(i)
location_set = set(location_list)
# print "the location set", location_set
## for str in ["DRY", "WET", "STD"]:
## location_set.remove(str)
return location_set
def build_jornada_etrm():
""""""
relate_dict = {"000": ["C01", "C02"], "001": ["C03", "C04", "C05", "C06", "C07", "C08", "C09"],
"002": ["C10", "C11"], "003": ["C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21"],
"004": ["C22", "C23", "C24", "C25", "C26", "C27", "C28", "C29"],
"005": ["C31", "C32", "C33", "C34", "C35", "C36", "C37", "C38", "C39"],
"006": ["C40", "C41", "C42", "C43", "C44", "C45", "C46", "C47", "C48"],
"007": ["C51", "C52", "C53", "C54", "C55", "C56", "C57"],
"008": ["C58", "C59", "C60", "C61", "C62", "C63", "C64", "C65", "C66"],
"009": ["C67", "C68", "C69", "C70"], "010": ["C71", "C72", "C73", "C74", "C75"],
"011": ["C76", "C77", "C78", "C79", "C80", "C81", "C82", "C83", "C84"],
"012": ["C85", "C86", "C87", "C88", "C89"]}
jornada_etrm = {}
for key, value in relate_dict.iteritems():
if len(value)>0:
for k in value:
jornada_etrm[k] = key
# print "the jornada etrm", jornada_etrm
return jornada_etrm
def nrml_rzsm(data, df_long):
"""
Normalize the volumetric soil water content into a RZSM by taking the Min - Max and normalizing on a scale of zero
to one.
:param data: A shorter dataset we need to normalize to min and max in order to plot.
:param df_long: A longer dataset that contains a lower min and higher max than the dataset we end up plotting
:return: normalized dataset
"""
print 'length of data', len(data)
print 'length of data long', len(df_long)
# convert from strings to float
data = [float(i) for i in data]
data_long = [float(i) for i in df_long]
# Get min and max from a longer dataset
ma = max(data_long)
print "ma", ma
mi = min(data_long)
print "mi", mi
# normalized scale
n0 = 0
n1 = 1
# create a new normalized dataset
nrml_data = [n0 + (value - mi)/(ma - mi) for value in data]
print "lenght of normalized data", len(nrml_data)
return nrml_data
def run():
"""
Get the Jornada data and for each gauge, plot a subplot for a different depth,
i.e for three depths its a three subplot plot.
:return:
"""
# TODO - Make an output text file that records the diff between min and max soil water content for each trans. loc.
#====== Tracker =======
# # path to a tracker file
# tracker_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/taw_295/etrm_tracker_000.csv"
# # get the main dataframe
# df_tracker = pd.read_csv(tracker_path)
# # print "df_tracker\n", df_tracker
# # we need to get rzsm and dates
# tdate = pd.to_datetime(df_tracker['Date'])
# # print 'tdate\n', tdate
# rzsm = df_tracker['rzsm']
# # print 'rzsm\n', rzsm
#====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print "the df \n", df
# print "df['Date'] \n", df['date']
# filter out missing data "."
df = df[df['swc_30cm'] != "."] #, 'swc_60cm', 'swc_110cm', 'swc_130cm'
df = df[df['swc_60cm'] != "."]
df = df[df['swc_90cm'] != "."]
df = df[df['swc_110cm'] != "."]
df = df[df['swc_130cm'] != "."]
# # Cut off extraneous dates we don't need...
df_long = df[df.index > 15000] # 32000 <- use for plotting
df = df[df.index > 32000]
# +=+=+=+=+=+= Automatic Plotter mode +=+=+=+=+=+=
# set TAW
taw = 115
# set tracker path
tracker_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/taw_{}".format(taw)
# print tracker_path
tracker_path_dict = {}
for path, directories, files in os.walk(tracker_path):
for i in files:
# print "file -> ", i
if len(i) == 20:
name = i[13:-4]
else:
name = i[13:-9]
# print "name", name
csv_path = os.path.join(path, i)
tracker_path_dict[name] = csv_path
print "tracker path dictionary \n", tracker_path_dict['001']
# Build the jornada ETRM dictionary relating every transect measurement point to a ETRM pixel.
jornada_etrm = build_jornada_etrm() #location_set, tracker_path_dict
# TODO - MIN MAX output function
# create a file at a place
min_max_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/min_max.txt"
with open(min_max_path, "w") as created_file:
created_file.write("\n -------------- \n MIN and MAX volumetric soil moisture for Jornada neutron probe data"
" \n -------------- \n")
# within the loop open the file in append mode (a)
for key, value in jornada_etrm.iteritems():
print "key -> ", key
print "value -> ", value
#===== TRACKER ======
df_tracker = pd.read_csv(tracker_path_dict[value])
# we need to get rzsm and dates
tdate = pd.to_datetime(df_tracker['Date'])
# print 'tdate\n', tdate
rzsm = df_tracker['rzsm']
# print 'rzsm\n', rzsm
pixel = value
# ===== Jornada ========
jornada_var = df[df['location'] == key]
# a long version of the jornada dataset to get a more accurate min and max from the whole dataset to perform
# the normalization with
jornada_var_long = df_long[df_long['location'] == key]
# ===== Append min and max to min_max.txt ========
# write out the key and value, key = probe, value = pixel
with open(min_max_path, 'a') as append_file:
append_file.write(" \n ====== \n probe {} / pixel {} \n====== \n".format(key, value))
# deal with all the separate depths and report them separately
list_of_codes = ['swc_30cm', 'swc_60cm', 'swc_90cm', 'swc_110cm', 'swc_130cm']
for code in list_of_codes:
jor_var_long = jornada_var_long[code]
jor_var_long = [float(i) for i in jor_var_long]
# Get min and max from a longer dataset
ma = max(jor_var_long)
mi = min(jor_var_long)
diff = ma - mi
# write the min and max underneath a code in the min_max.txt code
with open(min_max_path, 'a') as append_file:
append_file.write("\n ****** \n depth: {} \n min: {} \n max: {} "
"\n diff btwn max and min: {} \n \n".format(code, ma, mi, diff))
# ===== Depths ========
# 30 cm depth
j_30 = np.array(nrml_rzsm(jornada_var['swc_30cm'], jornada_var_long['swc_30cm']))
# convert to a float
# j_30 = j_30.astype(np.float)
# 60cm
j_60 = np.array(nrml_rzsm(jornada_var['swc_60cm'], jornada_var_long['swc_60cm']))
# j_60 = j_60.astype(np.float)
# 90cm
j_90 = np.array(nrml_rzsm(jornada_var['swc_90cm'], jornada_var_long['swc_90cm']))
# print "here is j_90 -> {}".format(j_90)
# j_90 = j_90.astype(np.float)
# 110cm
j_110 = np.array(nrml_rzsm(jornada_var['swc_110cm'], jornada_var_long['swc_110cm']))
# j_110 = j_110.astype(np.float)
# 130cm
j_130 = np.array(nrml_rzsm(jornada_var['swc_130cm'], jornada_var_long['swc_130cm']))
# j_130 = j_130.astype(np.float)
# get the date...
j_date = pd.to_datetime(jornada_var['date'])
j_name = key
plot_depths(j_date, j_30, j_60, j_90, j_110, j_130, j_name, tdate, rzsm, taw, pixel)
# todo - write a function that builds the jornada_etrm dict you are using for plotting
# todo - give plot_depths some alternative MODES that allow a comparison with all five depths or just with
# 30 cm
# todo - make another plotting function that plots all the associated jornada and etrm pixels together
# cumulatively on the same figure..
# todo - make a script that does all the TAWs plotted cummulatively...
# =================================== find_error() exclusive functions =============================================
def nrml_rzsm_for_error(data):
"""
Normalize the volumetric soil water content into a RZSM by taking the Min - Max and normalizing on a scale of zero
to one.
:return: normalized dataset
"""
print 'length of data', len(data)
print "DATA", data
# Get min and max from a long dataset
ma = max(data)
print "ma", ma
mi = min(data)
print "mi", mi
# normalized scale
n0 = 0
n1 = 1
# create a new normalized dataset
nrml_data = [n0 + ((value - mi) * (n1 - n0)) / (ma - mi) for value in data]
print "lenght of normalized data", len(nrml_data)
print "actual normal data array", nrml_data
return nrml_data
def float_data(data):
data = [float(i) for i in data]
return data
def depth_average(j_30, j_60, j_90, j_110, j_130):
"""
:param j_30: time series of 30m vol soil moisture data
:param j_60: time series of 60m vol soil moisture data
:param j_90: time series of 90m vol soil moisture data
:param j_110: time series of 110m vol soil moisture data
:param j_130: time series of 130m vol soil moisture data
:return: depth averaged vol soil moisture to be normalized.
"""
javg_lst = []
for j3, j6, j9, j11, j13 in zip(j_30, j_60, j_90, j_110, j_130):
# multiply each probe measurement by a depth weighting term and get the average of all depth weighted values
# 30cm(0-45), 60cm(45-75), 90cm(75-100), 110cm(100-120), 130cm(120-150) <- Depth weighting of the probes
print "values {} {} {} {} {}".format(j3, j6, j9, j11, j13)
# print "numerator", ((j3 * float(45/150)) + (j6 * float(30/150)) + (j9 * float(25/150)) + (j11 * float(20/150)) + (j13 * float(30/150)))
#
# print "numerator mod", ((j3) + (j6 ) + (j9) + (j11) + ( j13))
# print "numerator mod 2", (
# (j3 * (45)) + (j6 * (30 )) + (j9 * (25)) + (j11 * (20)) + (
# j13 * float(30)))
# TODO - Clean this up and make sure d_avg is correct.
d_avg = ((j3 * (45)) + (j6 * (30)) + (j9 * (25)) + (j11 * (20)) + (j13 * (30))) / 150.0
# j_avg = ((j3 * float(45/150)) + (j6 * float(30/150)) + (j9 * float(25/150)) + (j11 * float(20/150)) +
# (j13 * float(30/150))) / 5.0
javg_lst.append(d_avg)
print "j avg list", javg_lst
return javg_lst
def find_error():
"""
1.) depth average all the jornada data
2.) Convert the soil moisture values into a relative soil moisture condition
3.) On a per_pixel basis: Get the average, and std deviation and variability
4.) print that to a textfile or something.
:return:
"""
# ====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print "the df \n", df
# print "df['Date'] \n", df['date']
# filter out missing data "."
df = df[df['swc_30cm'] != "."] # , 'swc_60cm', 'swc_110cm', 'swc_130cm'
df = df[df['swc_60cm'] != "."]
df = df[df['swc_90cm'] != "."]
df = df[df['swc_110cm'] != "."]
df = df[df['swc_130cm'] != "."]
# # Cut off extraneous dates we don't need...
df_long = df[df.index > 15000] # 32000 <- use for plotting
df = df[df.index > 32000]
# dictionary that relates each pixel to the correct jornada stations.
relate_dict = {"000": ["C01", "C02"], "001": ["C03", "C04", "C05", "C06", "C07", "C08", "C09"],
"002": ["C10", "C11"], "003": ["C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21"],
"004": ["C22", "C23", "C24", "C25", "C26", "C27", "C28", "C29"],
"005": ["C31", "C32", "C33", "C34", "C35", "C36", "C37", "C38", "C39"],
"006": ["C40", "C41", "C42", "C43", "C44", "C45", "C46", "C47", "C48"],
"007": ["C51", "C52", "C53", "C54", "C55", "C56", "C57"],
"008": ["C58", "C59", "C60", "C61", "C62", "C63", "C64", "C65", "C66"],
"009": ["C67", "C68", "C69", "C70"], "010": ["C71", "C72", "C73", "C74", "C75"],
"011": ["C76", "C77", "C78", "C79", "C80", "C81", "C82", "C83", "C84"],
"012": ["C85", "C86", "C87", "C88", "C89"]}
# dictionary that relates each jornada station to the correct pixel.
jornada_etrm = build_jornada_etrm()
# loop through the jornada_etrm dictionary to get the proper Jornada data locations while tracking the ETRM pixel.
avg_normal_dictionary = {}
for key, value in jornada_etrm.iteritems():
# print "key -> ", key
# print "value -> ", value
# ===== Jornada ========
jornada_var = df[df['location'] == key]
# a long version of the jornada dataset to get a more accurate min and max from the whole dataset to perform
# the normalization with
jornada_var_long = df_long[df_long['location'] == key]
# you only want to use the long jornada var...
# print "Jornada VAR", jornada_var_long
# ===== Depths (FIND ERROR) <- Don't normalize the data yet ========
# 30 cm depth
j_30 = float_data(jornada_var_long['swc_30cm'])
# convert to a float
# j_30 = j_30.astype(np.float)
# 60cm
j_60 = float_data(jornada_var_long['swc_60cm'])
# j_60 = j_60.astype(np.float)
# 90cm
j_90 = float_data(jornada_var_long['swc_90cm'])
# print "here is j_90 -> {}".format(j_90)
# j_90 = j_90.astype(np.float)
# 110cm
j_110 = float_data(jornada_var_long['swc_110cm'])
# j_110 = j_110.astype(np.float)
# 130cm
j_130 = float_data(jornada_var_long['swc_130cm'])
# j_130 = j_130.astype(np.float)
# # get the date...
# j_date = pd.to_datetime(jornada_var['date'])
j_name = key
# depth average
j_avg = depth_average(j_30, j_60, j_90, j_110, j_130)
# normalize
jornada_avg_nrml = nrml_rzsm_for_error(j_avg)
# print "length of the depth avg {}".format(len(jornada_avg_nrml))
# add the normalized depth averaged value for each location into a new dictionary:
avg_normal_dictionary[key] = jornada_avg_nrml
# now we need to go through the relate dict to get the sigma, variability and average for each ETRM pixel
# print "relate dict \n {}".format(relate_dict)
# print "average_normal_dict \n {}".format(avg_normal_dictionary)
related_stats_dict = {}
for key, value in relate_dict.iteritems():
if len(value) > 0:
_loc_dict = {}
for loc in value:
# the depth averaged, normalized, time series for a given location
avg_nrml = np.array(avg_normal_dictionary[loc])
# the location average
loc_avg = np.average(avg_nrml)
# print "location {}'s average -> {}".format(loc, loc_avg)
# the location variance
loc_var = np.var(avg_nrml)
# print "location {}'s variance -> {}".format(loc, loc_var)
# the location std dev
loc_std = np.std(avg_nrml)
# print "location {}'s std deviatin -> {}".format(loc, loc_std)
stats = (loc_avg, loc_var, loc_std)
_loc_dict[loc] = stats
related_stats_dict[key] = _loc_dict
print "updated related statistics dictionary \n {}".format(related_stats_dict)
### find the Standard error of the mean for each pixel
std_err_dict = {}
for key, value in related_stats_dict.iteritems():
print "pixel {}".format(key)
# start a count to count the tubes in a given pixel.
num_tubes = 0
# ned to capture the averages
tube_avgs = []
for k, v in value.iteritems():
# add the average into the list
tube_avgs.append(v[0])
# count the tube
num_tubes += 1
# take the standard deviation of the tube averages
tube_avgs = np.array(tube_avgs)
tube_std = np.std(tube_avgs)
# std error of the mean
sem = tube_std/(float(num_tubes) ** (1/2))
# add to dictionary
std_err_dict[key] = sem
### find the average of averages for each pixel
avg_of_avgs = {}
for key, value in related_stats_dict.iteritems():
# start a count to count the tubes in a given pixel.
num_tubes = 0
# ned to capture the averages
tube_avgs = []
for k, v in value.iteritems():
# add the average into the list
tube_avgs.append(v[0])
# count the tube
num_tubes += 1
# add up the tube averages
sum_avgs = np.array(tube_avgs)
sum_avgs = np.sum(sum_avgs)
# divide by the number of tubes
avg_avg = sum_avgs/float(num_tubes)
# add to dictionary
avg_of_avgs[key] = avg_avg
# +=+=+=+=+=+=+=+=+=+=+=+= AGGREGATE and OUTPUT +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=
# create a file at a place
stats_output_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/stats_output_250m.txt"
with open(stats_output_path, "w") as created_file:
created_file.write("\n ------------------------------------- \n "
"AVG, VARIANCE and STD DEV for Jornada neutron probe data"
" \n ------------------------------------- \n")
for key, value in related_stats_dict.iteritems():
with open(stats_output_path, 'a') as append_file:
append_file.write("\n \n \n ****** \n STATS for PIXEL {} \n ****** \n \n ".format(key))
for k, v in value.iteritems():
with open(stats_output_path, 'a') as append_file:
append_file.write("\n \n === \n LOCATION {} \n === \n \
\n time_depth_average {} \n variance_depth_average {} \n"
" std_dev_depth_average {} ".format(k, v[0], v[1], v[2]))
with open(stats_output_path, "a") as append_file:
append_file.write("\n \n \n ----------------------------------------------"
" \n Standard Error of the Mean per ETRM Pixel "
"\n ---------------------------------------------- \n")
for key, value in std_err_dict.iteritems():
with open(stats_output_path, 'a') as append_file:
append_file.write("\n \n ****** \n STD ERR for PIXEL {} is -> {} \n ****** \n \n ".format(key, value))
with open(stats_output_path, "a") as append_file:
append_file.write("\n \n \n ----------------------------------------------"
" \n Average of Averages for each pixel"
"\n ---------------------------------------------- \n")
for key, value in avg_of_avgs.iteritems():
with open(stats_output_path, 'a') as append_file:
append_file.write("\n \n ****** \n AVG of AVGs for PIXEL {} is -> {} \n ****** \n \n ".format(key, value))
# =================================== storage_plot() exclusive functions =============================================
def plot_storage(total_storage, taw, dates, j_name, mi, ma, tdate, rzsm, pixel, etrm_taw):
""""""
# fig, ax = plt.subplots(figsize=(6,6))
#
# print "fig {}".format(fig)
# print "ax {}".format(ax)
rel_storage = [(storage-mi)/taw for storage in total_storage]
# for storage in total_storage:
# storage/taw
figure_title = "Relative Storage_location-{}_taw-{}_pixel-{}".format(j_name, etrm_taw, pixel)
fig = plt.figure()
# fig.suptitle("Soil moisture at different depths for {} at TAW = {} corr to pixel {}".format(name, taw, pixel), fontsize=12, fontweight='bold')
plt.rcParams['axes.grid'] = True
aa = fig.add_subplot(111)
aa.set_title("Root Zone Water Fraction for a total 150 cm depth: location-{}, "
"taw-{}, pixel- {}".format(j_name, etrm_taw, pixel), fontsize=10, fontweight='bold')
aa.set_xlabel('date', fontsize=12)
aa.set_ylabel('RZWF', fontsize = 12) #= (storage - minimum storage)/(max storage - min storage)
aa.plot(dates, rel_storage, linewidth=2)
aa.plot(tdate, rzsm, linewidth=2)
aa.plot_date(dates, rel_storage, marker='o', markersize=4)
# change the axes
plt.xlim(datetime.strptime("01/01/2000", "%m/%d/%Y"), datetime.strptime("01/01/2012", "%m/%d/%Y"))
plt.tight_layout()
# plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=.7, wspace=0.2, hspace=0.2)
# plt.subplots_adjust(top=.85)
# plt.show()
plt.savefig("/Users/Gabe/Desktop/juliet_stuff/jornada_plot_output/{}.pdf".format(figure_title))
plt.close(fig)
def calc_storage(j_30, j_60, j_90, j_110, j_130):
"""
:param j_30:
:param j_60:
:param j_90:
:param j_110:
:param j_130:
:return:
"""
j_storage_lst = []
for j3, j6, j9, j11, j13 in zip(j_30, j_60, j_90, j_110, j_130):
# multiply each probe vol soil moisture measurement by a depth
# 30cm(0-45), 60cm(45-75), 90cm(75-100), 110cm(100-120), 130cm(120-150) <- Depth weighting of the probes
storage = ((j3 * (45)) + (j6 * (30)) + (j9 * (25)) + (j11 * (20)) + (j13 * (30)))
j_storage_lst.append(storage)
# print "values {} {} {} {} {}".format(j3, j6, j9, j11, j13)
# print "list of storage -> {}".format(j_storage_lst)
return j_storage_lst
def storage_plot():
"""
1.) storage in each layer [theta * depth o' layer] <- time series
2.) Add up storages to get TOTAL STORAGE <- time series
3.) Get max and min of TOTAL STORAGE time series --> Max-Min = TAW
4.) Plot storage/TAW over time for each location
5.) Plot TAW over distance along transect
:return:
"""
# ====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print "the df \n", df
# print "df['Date'] \n", df['date']
# filter out missing data "."
df = df[df['swc_30cm'] != "."] # , 'swc_60cm', 'swc_110cm', 'swc_130cm'
df = df[df['swc_60cm'] != "."]
df = df[df['swc_90cm'] != "."]
df = df[df['swc_110cm'] != "."]
df = df[df['swc_130cm'] != "."]
# # Cut off extraneous dates we don't need...
df_long = df[df.index > 15000] # 32000 <- use for plotting
df = df[df.index > 32000]
# +=+=+=+=+=+= Automatic Plotter mode +=+=+=+=+=+=
# set TAW
etrm_taw = 70
# set tracker path
tracker_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/taw_{}".format(etrm_taw)
# print tracker_path
tracker_path_dict = {}
for path, directories, files in os.walk(tracker_path):
for i in files:
# print "file -> ", i
if len(i) == 20:
name = i[13:-4]
else:
name = i[13:-9]
# print "name", name
csv_path = os.path.join(path, i)
tracker_path_dict[name] = csv_path
# dictionary that relates each jornada station to the correct pixel.
jornada_etrm = build_jornada_etrm()
# loop through the jornada_etrm dictionary to get the proper Jornada data locations while tracking the ETRM pixel.
storage_taw_dict = {}
for key, value in jornada_etrm.iteritems():
# print "key -> ", key
# print "value -> ", value
# ===== Jornada ========
jornada_var = df[df['location'] == key]
# a long version of the jornada dataset to get a more accurate min and max from the whole dataset to perform
# the normalization with
jornada_var_long = df_long[df_long['location'] == key]
# you only want to use the long jornada var...
# print "Jornada VAR", jornada_var_long
# ===== TRACKER ======
df_tracker = pd.read_csv(tracker_path_dict[value])
# we need to get rzsm and dates
tdate = pd.to_datetime(df_tracker['Date'])
# print 'tdate\n', tdate
rzsm = df_tracker['rzsm']
# print 'rzsm\n', rzsm
pixel = value
# ===== Depths (plot storage) ========
# 30 cm depth
j_30 = float_data(jornada_var_long['swc_30cm'])
# 60cm
j_60 = float_data(jornada_var_long['swc_60cm'])
# 90cm
j_90 = float_data(jornada_var_long['swc_90cm'])
# 110cm
j_110 = float_data(jornada_var_long['swc_110cm'])
# 130cm
j_130 = float_data(jornada_var_long['swc_130cm'])
# get the date...
j_date = pd.to_datetime(jornada_var_long['date'])
j_name = key
# time series of storage
total_storage = calc_storage(j_30, j_60, j_90, j_110, j_130)
# get the TAW
# minimum
mi = min(total_storage)
print "this is the min {}".format(mi)
# maximum
ma = max(total_storage)
print "this is the max {}".format(ma)
taw = ma - mi
print "This is the TAW {}".format(taw)
# storage_taw_dict[key] = (total_storage, taw)
plot_storage(total_storage, taw, j_date, j_name, mi, ma, tdate, rzsm, pixel, etrm_taw)
# print "total storage and taw dictionary -> {}".format(storage_taw_dict)
# ========================= storage_plot_mod() exclusive functions =============================
def plot_storage_simple(dates, storage, storage_name, loc):
figure_title = "Storage for location - {} for assumed Storage Depth - {}".format(loc, storage_name)
fig = plt.figure()
# fig.suptitle("Soil moisture at different depths for {} at TAW = {} corr to pixel {}".format(name, taw, pixel), fontsize=12, fontweight='bold')
plt.rcParams['axes.grid'] = True
aa = fig.add_subplot(111)
aa.set_title(figure_title, fontsize=10, fontweight='bold')
aa.set_xlabel('date', fontsize=12)
aa.set_ylabel('Storage cm', fontsize=12) # = (storage - minimum storage)/(max storage - min storage)
aa.plot(dates, storage, linewidth=2)
aa.plot_date(dates, storage, marker='o', markersize=4)
# # change the axes
# plt.xlim(datetime.strptime("01/01/2000", "%m/%d/%Y"), datetime.strptime("01/01/2012", "%m/%d/%Y"))
plt.tight_layout()
# plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=.7, wspace=0.2, hspace=0.2)
# plt.subplots_adjust(top=.85)
plt.show()
def calc_storage_mod(swc):
"""
:param swc:
:return:
"""
depths = ('30', '60', '90', '110', '130')
depth_vals = {}
for d in depths:
moisture_depth = swc['swc_{}cm'.format(d)]
depth_vals[d] = moisture_depth
# print "Depth vals dict \n {}".format(depth_vals)
lifts = (45.0, 30.0, 25.0, 20.0, 25.0)
# get 30 cm storage
stor_30 = np.array(float_data(depth_vals['30'])) * lifts[0]
# get 60cm storage
stor_60 = 0
for i in range(0, 2):
stor_60 += np.array(float_data(depth_vals[depths[i]])) * lifts[i]
# get 90 cm storage
stor_90 = 0
for i in range(0, 3):
stor_90 += np.array(float_data(depth_vals[depths[i]])) * lifts[i]
# get 110 cm storage
stor_110 = 0
for i in range(0, 4):
stor_110 += np.array(float_data(depth_vals[depths[i]])) * lifts[i]
# get 130 cm storage
stor_130 = 0
for i in range(0, 5):
stor_130 += np.array(float_data(depth_vals[depths[i]])) * lifts[i]
return [stor_30, stor_60, stor_90, stor_110, stor_130]
def storage_plot_mod():
"""
:return:
"""
# ====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print "the df \n", df
# print "df['Date'] \n", df['date']
# filter out missing data "."
df = df[df['swc_30cm'] != "."] # , 'swc_60cm', 'swc_110cm', 'swc_130cm'
df = df[df['swc_60cm'] != "."]
df = df[df['swc_90cm'] != "."]
df = df[df['swc_110cm'] != "."]
df = df[df['swc_130cm'] != "."]
# # Cut off extraneous dates we don't need...
df_long = df[df.index > 15000] # 32000 <- use for plotting
df = df[df.index > 32000]
for i in range(1, 90):
key = "C{:02d}".format(i)
# print "Key {}".format(key)
swc = df_long[df_long['location'] == key]
print "SWC \n", swc
# get five sets of storages from the swc measurements for each tube...
storages = calc_storage_mod(swc)
storage_names = ['stor_30', 'stor_60', 'stor_90', 'stor_110', 'stor_130']
# to plot, link up the storages with the dates...
dates = pd.to_datetime(swc['date'])
for storage, name in zip(storages, storage_names):
plot_storage_simple(dates, storage, name, key)
# TODO 1) Set up to plot all three storages on the same plot
# TODO 2) Set up to plot ETrF alongside the storages...(seems like it could be involved).
# # =================================== find_std_error() exclusive functions =============================================
#
# def nrml_rzsm_for_error(data):
# """
# Normalize the volumetric soil water content into a RZSM by taking the Min - Max and normalizing on a scale of zero
# to one.
#
# :return: normalized dataset
# """
#
# # print 'length of data', len(data)
# #
# # print "DATA", data
#
# # Get min and max from a long dataset
# ma = max(data)
# # print "ma", ma
# mi = min(data)
# # print "mi", mi
#
# # normalized scale
# n0 = 0
# n1 = 1
#
# # create a new normalized dataset
# nrml_data = [n0 + ((value - mi) * (n1 - n0)) / (ma - mi) for value in data]
# # print "lenght of normalized data", len(nrml_data)
# # print "actual normal data array", nrml_data
#
# return nrml_data
#
# def float_data(data):
# data = [float(i) for i in data]
# return data
#
# def depth_average(j_30, j_60, j_90, j_110, j_130):
# """
#
# :param j_30: time series of 30m vol soil moisture data
# :param j_60: time series of 60m vol soil moisture data
# :param j_90: time series of 90m vol soil moisture data
# :param j_110: time series of 110m vol soil moisture data
# :param j_130: time series of 130m vol soil moisture data
# :return: depth averaged vol soil moisture to be normalized.
# """
# javg_lst = []
# for j3, j6, j9, j11, j13 in zip(j_30, j_60, j_90, j_110, j_130):
# # multiply each probe measurement by a depth weighting term and get the average of all depth weighted values
# # 30cm(0-45), 60cm(45-75), 90cm(75-100), 110cm(100-120), 130cm(120-150) <- Depth weighting of the probes
#
# # print "values {} {} {} {} {}".format(j3, j6, j9, j11, j13)
#
# # print "numerator", ((j3 * float(45/150)) + (j6 * float(30/150)) + (j9 * float(25/150)) + (j11 * float(20/150)) + (j13 * float(30/150)))
# #
# # print "numerator mod", ((j3) + (j6 ) + (j9) + (j11) + ( j13))
# # print "numerator mod 2", (
# # (j3 * (45)) + (j6 * (30 )) + (j9 * (25)) + (j11 * (20)) + (
# # j13 * float(30)))
# # TODO - Clean this up and make sure d_avg is correct.
# d_avg = ((j3 * (45)) + (j6 * (30)) + (j9 * (25)) + (j11 * (20)) + (j13 * (30))) / 150.0
#
# # j_avg = ((j3 * float(45/150)) + (j6 * float(30/150)) + (j9 * float(25/150)) + (j11 * float(20/150)) +
# # (j13 * float(30/150))) / 5.0
# javg_lst.append(d_avg)
#
# # print "j avg list", javg_lst
#
# return javg_lst
#
#
# def find_std_error():
# """
# 1.) depth average all the jornada data
# 2.) Convert the soil moisture values into a relative soil moisture condition
# 3.) On a per_pixel basis: Get the average, and std deviation and variability
# 4.) print that to a csv
# :return:
# """
#
# # TODO - Write a routine to format the dataframe into
#
# # ====== Jornada =======
# # path to the jornada data
# path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
# "Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
#
# df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
#
# # print "the df \n", df
# # print "df['Date'] \n", df['date']
#
# # filter out missing data "."
# df = df[df.swc_30cm != "."] # , 'swc_60cm', 'swc_110cm', 'swc_130cm' | df.swc_60cm | df.swc_90cm | df.swc_110cm | df.swc_130cm
# df = df[df.swc_60cm != "."]
# df = df[df.swc_90cm != "."]
# df = df[df.swc_110cm != "."]
# df = df[df.swc_130cm != "."]
#
# # # Cut off extraneous dates we don't need...
# df_long = df[df.index > 15000] # 32000 <- use for plotting
# # df = df[df.index > 32000]
# # df_long = df_long[df_long.index < 15250]
#
# # testing to see why the datasets at each location are different lengths...
#
# testpath = "/Users/Gabe/Desktop/test_water_content_data.csv"
#
# df_long.to_csv(testpath)
#
# # dictionary that relates each pixel to the correct jornada stations.
# relate_dict = {"000": ["C01", "C02"], "001": ["C03", "C04", "C05", "C06", "C07", "C08", "C09"],
# "002": ["C10", "C11"], "003": ["C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21"],
# "004": ["C22", "C23", "C24", "C25", "C26", "C27", "C28", "C29"],
# "005": ["C31", "C32", "C33", "C34", "C35", "C36", "C37", "C38", "C39"],
# "006": ["C40", "C41", "C42", "C43", "C44", "C45", "C46", "C47", "C48"],
# "007": ["C51", "C52", "C53", "C54", "C55", "C56", "C57"],
# "008": ["C58", "C59", "C60", "C61", "C62", "C63", "C64", "C65", "C66"],
# "009": ["C67", "C68", "C69", "C70"], "010": ["C71", "C72", "C73", "C74", "C75"],
# "011": ["C76", "C77", "C78", "C79", "C80", "C81", "C82", "C83", "C84"],
# "012": ["C85", "C86", "C87", "C88", "C89"]}
#
# # dictionary that relates each jornada station to the correct pixel.
# jornada_etrm = build_jornada_etrm()
#
# # loop through the jornada_etrm dictionary to get the proper Jornada data locations while tracking the ETRM pixel.
# avg_normal_dictionary = {}
# for key, value in jornada_etrm.iteritems():
# # print "key -> ", key
# # print "value -> ", value
#
# # ===== Jornada ========
#
# jornada_var = df[df['location'] == key]
#
# # a long version of the jornada dataset to get a more accurate min and max from the whole dataset to perform
# # the normalization with
# jornada_var_long = df_long[df_long['location'] == key]
#
# print "length jornada var long {}, key {}".format(len(jornada_var_long), key)
#
# # you only want to use the long jornada var...
# # print "Jornada VAR", jornada_var_long
#
# # ===== Depths (FIND ERROR) <- Don't normalize the data yet ========
#
# # 30 cm depth
# j_30 = float_data(jornada_var_long['swc_30cm'])
# print "len j_30", len(j_30)
# # convert to a float
# # j_30 = j_30.astype(np.float)
#
# # 60cm
# j_60 = float_data(jornada_var_long['swc_60cm'])
# print "len j_60", len(j_60)
# # j_60 = j_60.astype(np.float)
#
# # 90cm
# j_90 = float_data(jornada_var_long['swc_90cm'])
# print "len j_90", len(j_90)
# # print "here is j_90 -> {}".format(j_90)
# # j_90 = j_90.astype(np.float)
#
# # 110cm
# j_110 = float_data(jornada_var_long['swc_110cm'])
# print "len j_110", len(j_110)
# # j_110 = j_110.astype(np.float)
#
# # 130cm
# j_130 = float_data(jornada_var_long['swc_130cm'])
# print "len j_130", len(j_130)
# # j_130 = j_130.astype(np.float)
#
# # get the date...
# j_date = pd.to_datetime(jornada_var['date'])
#
# # print "THE DATE \n {}".format(j_date)
#
# # depth average
# j_avg = depth_average(j_30, j_60, j_90, j_110, j_130)
#
# # normalize
# jornada_avg_nrml = nrml_rzsm_for_error(j_avg)
#
# # print "length of the depth avg {}".format(len(jornada_avg_nrml))
#
# # add the normalized depth averaged value for each location into a new dictionary:
# avg_normal_dictionary[key] = jornada_avg_nrml
#
# avg_normal_dictionary['date'] = j_date
#
# # now we need to go through the relate dict to get the sigma, variability and average for each ETRM pixel
# # print "relate dict \n {}".format(relate_dict)
# # print "average_normal_dict \n {}".format(avg_normal_dictionary)
#
#
#
# depth_avg_time_series = {}
# for key, value in relate_dict.iteritems():
# if len(value) > 0:
# _loc_dict = {}
# for loc in value:
# # the depth averaged, normalized, time series for a given location
# avg_nrml = np.array(avg_normal_dictionary[loc])
#
# # add the time series to a dictionary
# _loc_dict[loc] = avg_nrml
#
# depth_avg_time_series[key] = _loc_dict
#
# # print "updated related statistics dictionary \n {}".format(depth_avg_time_series)
# #
# #
# # ### find the Standard error of the mean for each pixel
#
# std_error_d = {}
# # key_lst = []
# sum_lst = []
# for key, value in depth_avg_time_series.iteritems():
#
# # print "key", key
# #
# # print "value", value
#
# arr_lst = []
#
# for k, v in value.iteritems():
# if len(arr_lst) == 0:
# print "k first", k
# print 'v first', v
# print "first time!"
# arr_lst = v
# print "length first time {}".format(len(arr_lst))
# else:
# print "k", k
# print "v", v
# print "len v", len(v)
# prev_list = arr_lst
# arr_lst = v + prev_list
#
# print "summed list for key {} and list is \n {}".format(key, arr_lst)
# std_error_d[key] = arr_lst
#
# print "final sum dictionary {}".format(std_error_d)
#
#
#
#
# #
# # ### find the average of averages for each pixel
#
#
#
# # +=+=+=+=+=+=+=+=+=+=+=+= AGGREGATE and OUTPUT +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=
if __name__ == "__main__":
# # this will generate depth plots of ETRM and Jornada data and rzsm theta estimates
# run()
# # this outputs a text_file indicating the uncertainty of the RZSM for each pixel.
# find_error()
# # this will output plots of the relative storage for the Jornada tubes.
# storage_plot()
# this will output plots of the storage for the Jornada tubes at different storage levels
storage_plot_mod()
# # this outputs a csv of the running avg(depth_average) and standard error of the mean for each pixel.
# find_std_error() | apache-2.0 |
tamasgal/km3pipe | examples/plot_basic_analysis.py | 1 | 10138 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
======================
Basic Analysis Example
======================
"""
# Authors: Tamás Gál <[email protected]>, Moritz Lotze <[email protected]>
# License: BSD-3
# Date: 2017-10-10
# Status: Under construction...
#
# sphinx_gallery_thumbnail_number = 5
#####################################################
# Preparation
# -----------
# The very first thing we do is importing our libraries and setting up
# the Jupyter Notebook environment.
import matplotlib.pyplot as plt # our plotting module
import pandas as pd # the main HDF5 reader
import numpy as np # must have
import km3pipe as kp # some KM3NeT related helper functions
import seaborn as sns # beautiful statistical plots!
from km3net_testdata import data_path
#####################################################
# this is just to make our plots a bit "nicer", you can skip it
import km3pipe.style
km3pipe.style.use("km3pipe")
#####################################################
# Accessing the Data File(s)
# --------------------------
# In the following, we will work with one random simulation file with
# reconstruction information from JGandalf which has been converted
# from ROOT to HDF5 using the ``h5extract`` command line tool provided by
# ``KM3Pipe``.
#
# You can find the documentation here:
# https://km3py.pages.km3net.de/km3pipe/cmd.html#h5extract
#####################################################
# Note for Lyon Users
# ~~~~~~~~~~~~~~~~~~~
# If you are working on the Lyon cluster, you just need to load the
# Python module with ``module load python`` and you are all set.
#####################################################
# Converting from ROOT to HDF5 (if needed)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Choose a file (take e.g. one from /in2p3/km3net/mc/...),
# load the appropriate Jpp/Aanet version and convert it via::
#
# h5extract /path/to/a/reconstructed/file.root
#
# You can toggle a few options to include or exclude specific information.
# By default, everything will be extracted but you might want to skip
# Example the hit information. Have a look at ``h5extract -h``.
#
# You might also just pick some of the already converted files from
# HPSS/iRODS!
#####################################################
# First Look at the Data
# ----------------------
filepath = data_path("hdf5/basic_analysis_sample.h5")
#####################################################
# We can have a quick look at the file with the ``ptdump`` command
# in the terminal::
#
# ptdump filename.h5
#
# For further information, check out the documentation of the KM3NeT HDF5
# format definition: http://km3pipe.readthedocs.io/en/latest/hdf5.html
#
#####################################################
# The ``/event_info`` table contains general information about each event.
# The data is a simple 2D table and each event is represented by a single row.
#
# Let's have a look at the first few rows:
event_info = pd.read_hdf(filepath, "/event_info")
print(event_info.head(5))
#####################################################
# You can easily inspect the columns/fields of a ``Pandas.Dataframe`` with
# the ``.dtypes`` attribute:
print(event_info.dtypes)
#####################################################
# And access the data either by the property syntax (if it's a valid Python
# identifier) or the dictionary syntax, for example to access the neutrino
# weights:
print(event_info.weight_w2) # property syntax
print(event_info["weight_w2"]) # dictionary syntax
#####################################################
# Next, we will read out the MC tracks which are stored under ``/mc_tracks``.
tracks = pd.read_hdf(filepath, "/mc_tracks")
#####################################################
# It has a similar structure, but now you can have multiple rows which belong
# to an event. The ``event_id`` column holds the ID of the corresponding event.
print(tracks.head(10))
#####################################################
# We now are accessing the first track for each event by grouping via
# ``event_id`` and calling the ``first()`` method of the
# ``Pandas.DataFrame`` object.
primaries = tracks.groupby("event_id").first()
#####################################################
# Here are the first 5 primaries:
print(primaries.head(5))
#####################################################
# Creating some Fancy Graphs
# --------------------------
#####################################################
#
plt.hist(primaries.energy, bins=100, log=True)
plt.xlabel("energy [GeV]")
plt.ylabel("number of events")
plt.title("Energy Distribution")
#####################################################
#
primaries.bjorkeny.hist(bins=100)
plt.xlabel("bjorken-y")
plt.ylabel("number of events")
plt.title("bjorken-y Distribution")
#####################################################
#
zeniths = kp.math.zenith(primaries.filter(regex="^dir_.?$"))
primaries["zenith"] = zeniths
plt.hist(np.cos(primaries.zenith), bins=21, histtype="step", linewidth=2)
plt.xlabel(r"cos($\theta$)")
plt.ylabel("number of events")
plt.title("Zenith Distribution")
#####################################################
#
# Starting positions of primaries
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plt.hist2d(primaries.pos_x, primaries.pos_y, bins=100, cmap="viridis")
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.title("2D Plane")
plt.colorbar()
#####################################################
#
# If you have seaborn installed (`pip install seaborn`), you can easily create
# nice jointplots:
try:
import seaborn as sns # noqa
km3pipe.style.use("km3pipe") # reset matplotlib style
except:
print("No seaborn found, skipping example.")
else:
g = sns.jointplot("pos_x", "pos_y", data=primaries, kind="hex")
g.set_axis_labels("x [m]", "y[m]")
plt.subplots_adjust(right=0.90) # make room for the colorbar
plt.title("2D Plane")
plt.colorbar()
plt.legend()
#####################################################
#
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter3D(primaries.pos_x, primaries.pos_y, primaries.pos_z, s=3)
ax.set_xlabel("x [m]", labelpad=10)
ax.set_ylabel("y [m]", labelpad=10)
ax.set_zlabel("z [m]", labelpad=10)
ax.set_title("3D Plane")
#####################################################
#
gandalfs = pd.read_hdf(filepath, "/reco/gandalf")
print(gandalfs.head(5))
#####################################################
#
gandalfs.columns
#####################################################
#
# plt.hist(gandalfs['lambda'], bins=50, log=True)
# plt.xlabel('lambda parameter')
# plt.ylabel('count')
# plt.title('Lambda Distribution of Reconstructed Events')
#####################################################
#
gandalfs["zenith"] = kp.math.zenith(gandalfs.filter(regex="^dir_.?$"))
plt.hist((gandalfs.zenith - primaries.zenith).dropna(), bins=100)
plt.xlabel(r"true zenith - reconstructed zenith [rad]")
plt.ylabel("count")
plt.title("Zenith Reconstruction Difference")
#####################################################
#
l = 0.2
lambda_cut = gandalfs["lambda"] < l
plt.hist((gandalfs.zenith - primaries.zenith)[lambda_cut].dropna(), bins=100)
plt.xlabel(r"true zenith - reconstructed zenith [rad]")
plt.ylabel("count")
plt.title("Zenith Reconstruction Difference for lambda < {}".format(l))
#####################################################
# Combined zenith reco plot for different lambda cuts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig, ax = plt.subplots()
for l in [100, 5, 2, 1, 0.1]:
l_cut = gandalfs["lambda"] < l
ax.hist(
(primaries.zenith - gandalfs.zenith)[l_cut].dropna(),
bins=100,
label=r"$\lambda$ = {}".format(l),
alpha=0.7,
)
plt.xlabel(r"true zenith - reconstructed zenith [rad]")
plt.ylabel("count")
plt.legend()
plt.title("Zenith Reconstruction Difference for some Lambda Cuts")
#####################################################
# Fitting Angular resolutions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's fit some distributions: gaussian + lorentz (aka norm + cauchy)
#
# Fitting the gaussian to the whole range is a very bad fit, so
# we make a second gaussian fit only to +- 10 degree.
# Conversely, the Cauchy (lorentz) distribution is a near perfect fit
# (note that ``2 gamma = FWHM``).
from scipy.stats import cauchy, norm # noqa
residuals = gandalfs.zenith - primaries.zenith
cut = (gandalfs["lambda"] < l) & (np.abs(residuals) < 2 * np.pi)
residuals = residuals[cut]
event_info[cut]
# convert rad -> deg
residuals = residuals * 180 / np.pi
pi = 180
# x axis for plotting
x = np.linspace(-pi, pi, 1000)
c_loc, c_gamma = cauchy.fit(residuals)
fwhm = 2 * c_gamma
g_mu_bad, g_sigma_bad = norm.fit(residuals)
g_mu, g_sigma = norm.fit(residuals[np.abs(residuals) < 10])
plt.hist(residuals, bins="auto", label="Histogram", density=True, alpha=0.7)
plt.plot(
x,
cauchy(c_loc, c_gamma).pdf(x),
label="Lorentz: FWHM $=${:.3f}".format(fwhm),
linewidth=2,
)
plt.plot(
x,
norm(g_mu_bad, g_sigma_bad).pdf(x),
label="Unrestricted Gauss: $\sigma =$ {:.3f}".format(g_sigma_bad),
linewidth=2,
)
plt.plot(
x,
norm(g_mu, g_sigma).pdf(x),
label="+- 10 deg Gauss: $\sigma =$ {:.3f}".format(g_sigma),
linewidth=2,
)
plt.xlim(-pi / 4, pi / 4)
plt.xlabel("Zenith residuals / deg")
plt.legend()
####################################################################
# We can also look at the median resolution without doing any fits.
#
# In textbooks, this metric is also called Median Absolute Deviation.
resid_median = np.median(residuals)
residuals_shifted_by_median = residuals - resid_median
absolute_deviation = np.abs(residuals_shifted_by_median)
resid_mad = np.median(absolute_deviation)
plt.hist(np.abs(residuals), alpha=0.7, bins="auto", label="Absolute residuals")
plt.axvline(resid_mad, label="MAD: {:.2f}".format(resid_mad), linewidth=3)
plt.title("Average resolution: {:.3f} degree".format(resid_mad))
plt.legend()
plt.xlabel("Absolute zenith residuals / deg")
| mit |
LaurencePeanuts/Music | beatbox/ctu2015.py | 3 | 9335 | import numpy as np
import matplotlib.pylab as plt
import ipdb
plt.ion()
np.random.seed(3) # for reproduceability
r_cmb_mpc = 14.0
cmap = 'gray'
vscale = 15.
def demo():
"""
Short demo made at the "Compute the Universe 2015" Hack Week,
Berkeley. This was originally part of the "universe" class file
but has been extracted and tidied away here. The "Beatbox_Demo"
notebook should still work, though.
"""
# Generate fake CMB data on a Healpix sphere.
f = FakeHealpixData()
f.show()
# Define a 2d slice through our universe.
s = SliceSurface()
# Define an Inference object, then infer and visualize
# the minimum-variance phi field on the slice, given data
# on the sphere.
inf = Inference(f, s)
inf.calculate_mv_phi()
inf.view_phi_mv_slice()
# Make a bunch of realizations and analyze/visualize them.
'''
# RK: realizations have lower variance around the CMB ring (good), but
# have too-high variance in center of ring. I think it's an artifact
# of how I've defined the correlation/covariance function, namely as
# an integral that starts at k_min = 2*pi/(2*r_cmb). Not sure where
# to go from here.
slice_realizations = []
for i in range(20):
print i
this_slice_realization = inf.calculate_phi_realization()
slice_realizations.append(this_slice_realization)
slice_realizations = np.array(slice_realizations)
ipdb.set_trace()
'''
class CartesianCoordinates(object):
def __init__(self):
pass
def update_xyz(self):
self.xyz = np.vstack([self.x, self.y, self.z]).T
def make_distance_array(self, other_cart_coord):
#print '...making distance array...'
# Fast pairwise distances, see
# https://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/
from scipy.spatial.distance import cdist
return cdist(self.xyz, other_cart_coord.xyz)
def make_auto_distance_array(self):
return self.make_distance_array(self)
class SliceSurface(CartesianCoordinates):
def __init__(self, position=0., side_mpc=30., reso_mpc=0.8):
self.side_mpc = side_mpc
self.reso_mpc = reso_mpc
n_side = np.ceil(side_mpc/reso_mpc)
self.n_side = n_side
x_2d = self.reso_mpc*np.tile(np.arange(n_side),n_side).reshape(n_side, n_side)
x_2d -= x_2d.mean()
y_2d = self.reso_mpc*np.tile(np.arange(n_side),n_side).reshape(n_side, n_side).T
y_2d -= y_2d.mean()
z_2d = self.reso_mpc*np.zeros_like(x_2d) + position
z_2d -= z_2d.mean()
self.n_side = n_side
self.x = x_2d.ravel()
self.y = y_2d.ravel()
self.z = z_2d.ravel()
self.update_xyz()
class HealpixSphericalSurface(CartesianCoordinates):
def __init__(self, radius_mpc=r_cmb_mpc, n_side=2**4):
# FYI: n_side = 2**4 corresponds to
# 0.064 radians resolution = ~0.9 Mpc at z~1000.
from healpy import nside2npix, pix2vec
self.n_pix = nside2npix(n_side)
x, y, z = pix2vec(n_side, range(self.n_pix))
self.radius_mpc = radius_mpc
self.x = self.radius_mpc*x
self.y = self.radius_mpc*y
self.z = self.radius_mpc*z
self.update_xyz()
class FakeHealpixData(HealpixSphericalSurface):
def __init__(self, sigma=1e-10):
HealpixSphericalSurface.__init__(self)
self.sigma = sigma
self.data = np.zeros(self.n_pix)
self.add_truth()
self.add_noise()
def add_truth(self):
#print '...adding truth...'
distance = self.make_auto_distance_array()
delta = distance[distance!=0].min()
cov = large_scale_phi_covariance(distance)
from numpy.random import multivariate_normal
self.data += multivariate_normal(np.zeros(self.n_pix), cov)
def add_noise(self):
#print '...adding noise...'
from numpy.random import randn
self.data += self.sigma*randn(self.n_pix)
pass
def show(self):
from healpy import mollview
mollview(self.data)#, cmap=cmap, min=-vscale, max=+vscale)
class HealpixPlusSlice(CartesianCoordinates):
def __init__(self):
healpix = HealpixSphericalSurface()
slice = SliceSurface()
self.n_healpix = len(healpix.x)
self.n_slice = len(slice.x)
self.n_total = self.n_healpix + self.n_slice
self.ind_healpix = range(0, self.n_healpix)
self.ind_slice = range(self.n_healpix, self.n_total)
self.x = np.hstack([healpix.x, slice.x])
self.y = np.hstack([healpix.y, slice.y])
self.z = np.hstack([healpix.z, slice.z])
self.update_xyz()
def large_scale_phi_covariance(distance):
# should be something like
# cov(r) ~ Int(dk * sin(k*r)/(k**2 * r) )
# see Equation 9.32 from Dodelson's Cosmology.
# The integral will diverge unless we put in this k_min.
k_min = 2.*np.pi / (2. * r_cmb_mpc) # hack
k_max = 2.*np.pi / (2. * 0.25) # hack
# Evaluate covariance on 1d grid.
k_vec = np.arange(k_min, k_max, k_min/4.)
d_vec = np.arange(0., 1.01*distance.max(), 0.1)
pk_phi = k_vec**(-3.)
kd_vec = k_vec * d_vec[:,np.newaxis]
from scipy.special import jv
cov_vec = np.sum(pk_phi / k_vec * k_vec**3. * jv(0, kd_vec), axis=1)
#plt.plot(d_vec, cov_vec)
# Now interpolate onto 2d grid.
from scipy import interpolate
f = interpolate.interp1d(d_vec, cov_vec)
cov = f(distance)
# Let's force the covariance to be unity along the diagonal.
# I.e. let's define the variance of each point to be 1.0.
#cov_diag = cov.diagonal().copy()
#cov /= np.sqrt(cov_diag)
#cov /= np.sqrt(cov_diag.T)
return cov
class Inference(object):
def __init__(self, data_object, test_object):
# DATA_OBJECT is e.g. a FakeHealpixData object.
# It's where you have data.
# TEST_OBJECT is e.g. a SliceSurface object.
# It's where you want to make inferences.
self.data = data_object
self.test = test_object
def calculate_phi_realization(self):
###############################################################
# Coded up from Equation 18 in Roland's note,
# https://www.dropbox.com/s/hsq44r7cs1rwkuq/MusicofSphere.pdf
# Is there a faster algorithm than this?
###############################################################
# Ryan's understanding of this:
# Define a coordinate object that includes points on the sphere
# and on a 2d slice.
joint = HealpixPlusSlice()
# Do some preparatory work.
# We only do this once when making multiple realizations.
if not(hasattr(self, 'cov_joint')):
dist = joint.make_auto_distance_array()
cov_joint = large_scale_phi_covariance(dist)
self.cov_joint = cov_joint
if not(hasattr(self, 'phi_mv')):
self.calculate_mv_phi()
# Generate noise-free truth *simultaneously* on Sphere and Slice.
from numpy.random import multivariate_normal
realization_truth = multivariate_normal(np.zeros(joint.n_total), self.cov_joint)
sphere_truth = realization_truth[joint.ind_healpix]
slice_truth = realization_truth[joint.ind_slice]
# Add noise to Sphere points.
noise = self.data.sigma*np.random.randn(joint.n_healpix)
sphere_data = sphere_truth + noise
# Generate MV estimate on Slice.
tmp = np.dot(self.inv_cov_data_data , sphere_data)
this_phi_mv_slice = np.dot(self.cov_data_test.T, tmp)
# Get the difference of the MV estimate on Slice and the truth on Slice.
diff_mv = this_phi_mv_slice - slice_truth
# Add that difference to your *original* MV estimate on Slice.
# Now you have a sample/realization of the posterior on the Slice, given original data.
this_realization = self.phi_mv + diff_mv
return this_realization
def calculate_mv_phi(self):
self.get_data_data_covariance()
self.get_data_test_covariance()
tmp = np.dot(self.inv_cov_data_data , self.data.data)
self.phi_mv = np.dot(self.cov_data_test.T, tmp)
def get_data_data_covariance(self):
# Get phi covariance between data space and data space.
from numpy.linalg import inv
dist_data_data = self.data.make_auto_distance_array()
cov_data_data = large_scale_phi_covariance(dist_data_data)
self.inv_cov_data_data = inv(cov_data_data)
def get_data_test_covariance(self):
# Get phi covariance between data space and test space.
dist_data_test = self.data.make_distance_array(self.test)
cov_data_test = large_scale_phi_covariance(dist_data_test)
self.cov_data_test = cov_data_test
def view_phi_mv_slice(self):
self.view_slice(self.phi_mv)
def view_slice(self, slice_1d):
slice_2d = slice_1d.reshape(self.test.n_side, self.test.n_side)
plt.figure(figsize=(7,7))
plt.imshow(slice_2d, cmap=cmap, vmin=-vscale, vmax=+vscale)
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/indexes/datetimes/test_date_range.py | 6 | 22004 | """
test date_range, bdate_range, cdate_range
construction from the convenience range functions
"""
import pytest
import numpy as np
from datetime import datetime, timedelta, time
import pandas as pd
import pandas.util.testing as tm
from pandas import compat
from pandas.core.indexes.datetimes import bdate_range, cdate_range
from pandas import date_range, offsets, DatetimeIndex, Timestamp
from pandas.tseries.offsets import (generate_range, CDay, BDay,
DateOffset, MonthEnd)
from pandas.tests.series.common import TestData
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
def eq_gen_range(kwargs, expected):
rng = generate_range(**kwargs)
assert (np.array_equal(list(rng), expected))
class TestDateRanges(TestData):
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
assert len(rng) == 4
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range('2011-12-31', freq='-2A', periods=3)
exp = pd.DatetimeIndex(['2011-12-31', '2009-12-31',
'2007-12-31'], freq='-2A')
tm.assert_index_equal(rng, exp)
assert rng.freq == '-2A'
rng = date_range('2011-01-31', freq='-2M', periods=3)
exp = pd.DatetimeIndex(['2011-01-31', '2010-11-30',
'2010-09-30'], freq='-2M')
tm.assert_index_equal(rng, exp)
assert rng.freq == '-2M'
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)])
tm.assert_index_equal(rng, values)
rng = date_range('1/1/2000 08:15', periods=n, normalize=False,
freq='B')
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01", periods=2, freq=offsets.FY5253(
startingMonth=1, weekday=3, variation="nearest"))
assert dr[0] == Timestamp('2013-01-31')
assert dr[1] == Timestamp('2014-01-30')
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
pytest.raises(ValueError, date_range, start, end, freq='s',
periods=10)
def test_date_range_businesshour(self):
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
'2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00',
'2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00'],
freq='BH')
rng = date_range('2014-07-04 09:00', '2014-07-04 16:00', freq='BH')
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
['2014-07-04 16:00', '2014-07-07 09:00'], freq='BH')
rng = date_range('2014-07-04 16:00', '2014-07-07 09:00', freq='BH')
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
'2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00',
'2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00',
'2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00',
'2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00',
'2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00',
'2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'],
freq='BH')
rng = date_range('2014-07-04 09:00', '2014-07-08 16:00', freq='BH')
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
pytest.raises(ValueError, date_range, '1/1/2000')
pytest.raises(ValueError, date_range, end='1/1/2000')
pytest.raises(ValueError, date_range, periods=10)
pytest.raises(ValueError, date_range, '1/1/2000', freq='H')
pytest.raises(ValueError, date_range, end='1/1/2000', freq='H')
pytest.raises(ValueError, date_range, periods=10, freq='H')
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
for f in [compat.long, int]:
result = date_range(Timestamp('1960-04-01 00:00:00',
freq='QS-JAN'),
periods=f(76),
freq='QS-JAN')
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
pytest.raises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
class TestGenRangeGeneration(object):
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, time_rule='B'))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, time_rule='C'))
assert rng1 == rng2
def test_1(self):
eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
def test_2(self):
eq_gen_range(dict(start=datetime(2008, 1, 1),
end=datetime(2008, 1, 3)),
[datetime(2008, 1, 1),
datetime(2008, 1, 2),
datetime(2008, 1, 3)])
def test_3(self):
eq_gen_range(dict(start=datetime(2008, 1, 5),
end=datetime(2008, 1, 6)),
[])
def test_precision_finer_than_offset(self):
# GH 9907
result1 = DatetimeIndex(start='2015-04-15 00:00:03',
end='2016-04-22 00:00:00', freq='Q')
result2 = DatetimeIndex(start='2015-04-15 00:00:03',
end='2015-06-22 00:00:04', freq='W')
expected1_list = ['2015-06-30 00:00:03', '2015-09-30 00:00:03',
'2015-12-31 00:00:03', '2016-03-31 00:00:03']
expected2_list = ['2015-04-19 00:00:03', '2015-04-26 00:00:03',
'2015-05-03 00:00:03', '2015-05-10 00:00:03',
'2015-05-17 00:00:03', '2015-05-24 00:00:03',
'2015-05-31 00:00:03', '2015-06-07 00:00:03',
'2015-06-14 00:00:03', '2015-06-21 00:00:03']
expected1 = DatetimeIndex(expected1_list, dtype='datetime64[ns]',
freq='Q-DEC', tz=None)
expected2 = DatetimeIndex(expected2_list, dtype='datetime64[ns]',
freq='W-SUN', tz=None)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
class TestBusinessDateRange(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B')
pytest.raises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B')
def test_naive_aware_conflicts(self):
naive = bdate_range(START, END, freq=BDay(), tz=None)
aware = bdate_range(START, END, freq=BDay(),
tz="Asia/Hong_Kong")
tm.assert_raises_regex(TypeError, "tz-naive.*tz-aware",
naive.join, aware)
tm.assert_raises_regex(TypeError, "tz-naive.*tz-aware",
aware.join, naive)
def test_cached_range(self):
DatetimeIndex._cached_range(START, END, offset=BDay())
DatetimeIndex._cached_range(START, periods=20, offset=BDay())
DatetimeIndex._cached_range(end=START, periods=20, offset=BDay())
tm.assert_raises_regex(TypeError, "offset",
DatetimeIndex._cached_range,
START, END)
tm.assert_raises_regex(TypeError, "specify period",
DatetimeIndex._cached_range, START,
offset=BDay())
tm.assert_raises_regex(TypeError, "specify period",
DatetimeIndex._cached_range, end=END,
offset=BDay())
tm.assert_raises_regex(TypeError, "start or end",
DatetimeIndex._cached_range, periods=20,
offset=BDay())
def test_cached_range_bug(self):
rng = date_range('2010-09-01 05:00:00', periods=50,
freq=DateOffset(hours=6))
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp('20130220 10:00', tz='US/Eastern')
result = date_range(start, periods=2, tz='US/Eastern')
assert len(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp('20130220 10:00', tz='US/Eastern')
pytest.raises(AssertionError, date_range, start, periods=2,
tz='Europe/Berlin')
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * BDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = '2007/100/1'
pytest.raises(ValueError, Timestamp, badly_formed_date)
pytest.raises(ValueError, bdate_range, start=badly_formed_date,
periods=10)
pytest.raises(ValueError, bdate_range, end=badly_formed_date,
periods=10)
pytest.raises(ValueError, bdate_range, badly_formed_date,
badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range('12/5/2011', '12/5/2011')
rng2 = bdate_range('12/2/2011', '12/5/2011')
rng2.offset = BDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
def test_error_with_zero_monthends(self):
pytest.raises(ValueError, date_range, '1/1/2000', '1/1/2001',
freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
exp_values = [start + i * offset for i in range(5)]
tm.assert_index_equal(result, DatetimeIndex(exp_values))
def test_range_tz_pytz(self):
# GH 2906
tm._skip_if_no_pytz()
from pytz import timezone
tz = timezone('US/Eastern')
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
def test_range_tz_dst_straddle_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone
tz = timezone('US/Eastern')
dates = [(tz.localize(datetime(2014, 3, 6)),
tz.localize(datetime(2014, 3, 12))),
(tz.localize(datetime(2013, 11, 1)),
tz.localize(datetime(2013, 11, 6)))]
for (start, end) in dates:
dr = date_range(start, end, freq='D')
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq='D', tz='US/Eastern')
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start.replace(tzinfo=None), end.replace(
tzinfo=None), freq='D', tz='US/Eastern')
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# GH 2906
tm._skip_if_no_dateutil()
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslib import maybe_get_tz
tz = lambda x: maybe_get_tz('dateutil/' + x)
start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern'))
end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern'))
dr = date_range(start=start, periods=3)
assert dr.tz == tz('US/Eastern')
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz('US/Eastern')
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz('US/Eastern')
assert dr[0] == start
assert dr[2] == end
def test_range_closed(self):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp('2011/1/1', tz='US/Eastern')
end = Timestamp('2014/1/1', tz='US/Eastern')
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp('2011/1/1')
end = Timestamp('2014/1/1')
begintz = Timestamp('2011/1/1', tz='US/Eastern')
endtz = Timestamp('2014/1/1', tz='US/Eastern')
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq,
tz='US/Eastern')
left = date_range(begin, end, closed="left", freq=freq,
tz='US/Eastern')
right = date_range(begin, end, closed="right", freq=freq,
tz='US/Eastern')
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_boundary(self):
# GH 11804
for closed in ['right', 'left', None]:
right_boundary = date_range('2015-09-12', '2015-12-01',
freq='QS-MAR', closed=closed)
left_boundary = date_range('2015-09-01', '2015-09-12',
freq='QS-MAR', closed=closed)
both_boundary = date_range('2015-09-01', '2015-12-01',
freq='QS-MAR', closed=closed)
expected_right = expected_left = expected_both = both_boundary
if closed == 'right':
expected_left = both_boundary[1:]
if closed == 'left':
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range('2014', '2015', freq='M')
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divides_end_in_nanos(self):
# GH 10885
result_1 = date_range('2005-01-12 10:00', '2005-01-12 16:00',
freq='345min')
result_2 = date_range('2005-01-13 10:00', '2005-01-13 16:00',
freq='345min')
expected_1 = DatetimeIndex(['2005-01-12 10:00:00',
'2005-01-12 15:45:00'],
dtype='datetime64[ns]', freq='345T',
tz=None)
expected_2 = DatetimeIndex(['2005-01-13 10:00:00',
'2005-01-13 15:45:00'],
dtype='datetime64[ns]', freq='345T',
tz=None)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
class TestCustomDateRange(object):
def setup_method(self, method):
self.rng = cdate_range(START, END)
def test_constructor(self):
cdate_range(START, END, freq=CDay())
cdate_range(START, periods=20, freq=CDay())
cdate_range(end=START, periods=20, freq=CDay())
pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'C')
pytest.raises(ValueError, cdate_range, '2011-1-1', '2012-1-1', 'C')
def test_cached_range(self):
DatetimeIndex._cached_range(START, END, offset=CDay())
DatetimeIndex._cached_range(START, periods=20,
offset=CDay())
DatetimeIndex._cached_range(end=START, periods=20,
offset=CDay())
pytest.raises(Exception, DatetimeIndex._cached_range, START, END)
pytest.raises(Exception, DatetimeIndex._cached_range, START,
freq=CDay())
pytest.raises(Exception, DatetimeIndex._cached_range, end=END,
freq=CDay())
pytest.raises(Exception, DatetimeIndex._cached_range, periods=20,
freq=CDay())
def test_misc(self):
end = datetime(2009, 5, 13)
dr = cdate_range(end=end, periods=20)
firstDate = end - 19 * CDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = '2007/100/1'
pytest.raises(ValueError, Timestamp, badly_formed_date)
pytest.raises(ValueError, cdate_range, start=badly_formed_date,
periods=10)
pytest.raises(ValueError, cdate_range, end=badly_formed_date,
periods=10)
pytest.raises(ValueError, cdate_range, badly_formed_date,
badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = cdate_range('12/5/2011', '12/5/2011')
rng2 = cdate_range('12/2/2011', '12/5/2011')
rng2.offset = CDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
def test_cdaterange(self):
rng = cdate_range('2013-05-01', periods=3)
xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-03'])
tm.assert_index_equal(xp, rng)
def test_cdaterange_weekmask(self):
rng = cdate_range('2013-05-01', periods=3,
weekmask='Sun Mon Tue Wed Thu')
xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-05'])
tm.assert_index_equal(xp, rng)
def test_cdaterange_holidays(self):
rng = cdate_range('2013-05-01', periods=3, holidays=['2013-05-01'])
xp = DatetimeIndex(['2013-05-02', '2013-05-03', '2013-05-06'])
tm.assert_index_equal(xp, rng)
def test_cdaterange_weekmask_and_holidays(self):
rng = cdate_range('2013-05-01', periods=3,
weekmask='Sun Mon Tue Wed Thu',
holidays=['2013-05-01'])
xp = DatetimeIndex(['2013-05-02', '2013-05-05', '2013-05-06'])
tm.assert_index_equal(xp, rng)
| mit |
mfherbst/spack | var/spack/repos/builtin/packages/py-abipy/package.py | 5 | 3487 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyAbipy(PythonPackage):
"""Python package to automate ABINIT calculations and analyze
the results."""
homepage = "https://github.com/abinit/abipy"
url = "https://pypi.io/packages/source/a/abipy/abipy-0.2.0.tar.gz"
version('0.2.0', 'af9bc5cf7d5ca1a56ff73e2a65c5bcbd')
variant('gui', default=False, description='Build the GUI')
variant('ipython', default=False, description='Build IPython support')
extends('python', ignore='bin/(feff_.*|gaussian_analyzer|get_environment|html2text|nc3tonc4|nc4tonc3|ncinfo|pmg|pydii|tabulate|tqdm)')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-prettytable', type=('build', 'run'))
depends_on('py-tabulate', type=('build', 'run'))
depends_on('[email protected]', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-html2text', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-spglib', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-netcdf4', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-seaborn', type=('build', 'run'))
depends_on('py-wxpython', type=('build', 'run'), when='+gui')
depends_on('py-wxmplot', type=('build', 'run'), when='+gui')
depends_on('py-ipython', type=('build', 'run'), when='+ipython')
depends_on('py-jupyter', type=('build', 'run'), when='+ipython')
depends_on('py-nbformat', type=('build', 'run'), when='+ipython')
def build_args(self, spec, prefix):
args = []
if '+ipython' in spec:
args.append('--with-ipython')
return args
| lgpl-2.1 |
mxjl620/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/tests/test_patches.py | 2 | 10593 | """
Tests specific to the patches module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.collections as mcollections
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
import matplotlib.style as mstyle
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
def test_negative_rect():
# These two rectangles have the same vertices, but starting from a
# different point. (We also drop the last vertex, which is a duplicate.)
pos_vertices = Rectangle((-3, -2), 3, 2).get_verts()[:-1]
neg_vertices = Rectangle((0, 0), -3, -2).get_verts()[:-1]
assert_array_equal(np.roll(neg_vertices, 2, 0), pos_vertices)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtrans.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@cleanup(style='default')
def test_patch_color_none():
# Make sure the alpha kwarg does not override 'none' facecolor.
# Addresses issue #7478.
c = plt.Circle((0, 0), 1, facecolor='none', alpha=1)
assert c.get_facecolor()[0] == 0
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@cleanup
def test_patch_linestyle_accents():
#: Test if linestyle can also be specified with short menoics
#: like "--"
#: c.f. Gihub issue #2136
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
linestyles = ["-", "--", "-.", ":",
"solid", "dashed", "dashdot", "dotted"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ls in enumerate(linestyles):
star = mpath.Path(verts + i, codes)
patch = mpatches.PathPatch(star,
linewidth=3, linestyle=ls,
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
ax.set_xlim([-1, i + 1])
ax.set_ylim([-1, i + 1])
fig.canvas.draw()
assert True
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in six.iteritems(param_dict):
assert_equal(getattr(w, attr), old_v)
getattr(w, func)(new_v)
assert_equal(getattr(w, attr), new_v)
@image_comparison(baseline_images=['wedge_range'],
remove_text=True)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
@image_comparison(baseline_images=['multi_color_hatch'],
remove_text=True, style='default')
def test_multi_color_hatch():
fig, ax = plt.subplots()
rects = ax.bar(range(5), range(1, 6))
for i, rect in enumerate(rects):
rect.set_facecolor('none')
rect.set_edgecolor('C{}'.format(i))
rect.set_hatch('/')
for i in range(5):
with mstyle.context({'hatch.color': 'C{}'.format(i)}):
r = Rectangle((i-.8/2, 5), .8, 1, hatch='//', fc='none')
ax.add_patch(r)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| bsd-2-clause |
mnwhite/HARK | ConsumptionSaving/ConsPersistentShockModel.py | 1 | 62424 | '''
Classes to solve consumption-saving models with idiosyncratic shocks to income
in which shocks are not necessarily fully transitory or fully permanent. Extends
ConsIndShockModel by explicitly tracking permanent income as a state variable,
and allows (log) permanent income to follow an AR1 process rather than random walk.
'''
import sys
import os
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
from copy import copy, deepcopy
import numpy as np
from HARKcore import HARKobject
from HARKutilities import warnings # Because of "patch" to warnings modules
from HARKinterpolation import LowerEnvelope2D, BilinearInterp, Curvilinear2DInterp,\
LinearInterpOnInterp1D, LinearInterp, CubicInterp, VariableLowerBoundFunc2D
from HARKutilities import CRRAutility, CRRAutilityP, CRRAutilityPP, CRRAutilityP_inv,\
CRRAutility_invP, CRRAutility_inv, CRRAutilityP_invP,\
approxLognormal
from HARKsimulation import drawBernoulli, drawLognormal
from ConsIndShockModel import ConsIndShockSetup, ConsumerSolution, IndShockConsumerType
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
class ValueFunc2D(HARKobject):
'''
A class for representing a value function in a model where permanent income
is explicitly included as a state variable. The underlying interpolation is
in the space of (m,p) --> u_inv(v); this class "re-curves" to the value function.
'''
distance_criteria = ['func','CRRA']
def __init__(self,vFuncNvrs,CRRA):
'''
Constructor for a new value function object.
Parameters
----------
vFuncNvrs : function
A real function representing the value function composed with the
inverse utility function, defined on market resources and permanent
income: u_inv(vFunc(m,p))
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.func = deepcopy(vFuncNvrs)
self.CRRA = CRRA
def __call__(self,m,p):
'''
Evaluate the value function at given levels of market resources m and
permanent income p.
Parameters
----------
m : float or np.array
Market resources whose value is to be calcuated.
p : float or np.array
Permanent income levels whose value is to be calculated.
Returns
-------
v : float or np.array
Lifetime value of beginning this period with market resources m and
permanent income p; has same size as inputs m and p.
'''
return utility(self.func(m,p),gam=self.CRRA)
class MargValueFunc2D(HARKobject):
'''
A class for representing a marginal value function in models where the
standard envelope condition of v'(m,p) = u'(c(m,p)) holds (with CRRA utility).
This is copied from ConsAggShockModel, with the second state variable re-
labeled as permanent income p.
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources and the level of permanent income: uP_inv(vPfunc(m,p)).
Called cFunc because when standard envelope condition applies,
uP_inv(vPfunc(m,p)) = cFunc(m,p).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m,p):
'''
Evaluate the marginal value function at given levels of market resources
m and permanent income p.
Parameters
----------
m : float or np.array
Market resources whose value is to be calcuated.
p : float or np.array
Permanent income levels whose value is to be calculated.
Returns
-------
vP : float or np.array
Marginal value of market resources when beginning this period with
market resources m and permanent income p; has same size as inputs
m and p.
'''
return utilityP(self.cFunc(m,p),gam=self.CRRA)
def derivativeX(self,m,p):
'''
Evaluate the first derivative with respect to market resources of the
marginal value function at given levels of market resources m and per-
manent income p.
Parameters
----------
m : float or np.array
Market resources whose value is to be calcuated.
p : float or np.array
Permanent income levels whose value is to be calculated.
Returns
-------
vPP : float or np.array
Marginal marginal value of market resources when beginning this period
with market resources m and permanent income p; has same size as inputs
m and p.
'''
c = self.cFunc(m,p)
MPC = self.cFunc.derivativeX(m,p)
return MPC*utilityPP(c,gam=self.CRRA)
class MargMargValueFunc2D(HARKobject):
'''
A class for representing a marginal marginal value function in models where the
standard envelope condition of v'(m,p) = u'(c(m,p)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources and the level of permanent income: uP_inv(vPfunc(m,p)).
Called cFunc because when standard envelope condition applies,
uP_inv(vPfunc(M,p)) = cFunc(m,p).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m,p):
'''
Evaluate the marginal marginal value function at given levels of market
resources m and permanent income p.
Parameters
----------
m : float or np.array
Market resources whose marginal marginal value is to be calculated.
p : float or np.array
Permanent income levels whose marginal marginal value is to be calculated.
Returns
-------
vPP : float or np.array
Marginal marginal value of beginning this period with market
resources m and permanent income p; has same size as inputs.
'''
c = self.cFunc(m,p)
MPC = self.cFunc.derivativeX(m,p)
return MPC*utilityPP(c,gam=self.CRRA)
###############################################################################
class ConsIndShockSolverExplicitPermInc(ConsIndShockSetup):
'''
A class for solving the same one period "idiosyncratic shocks" problem as
ConsIndShock, but with permanent income explicitly tracked as a state variable.
'''
def __init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver for a one period problem with idiosyncratic
shocks to permanent and transitory income, with permanent income tracked
as a state variable rather than normalized out.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
self.assignParameters(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
self.defUtilityFuncs()
def assignParameters(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Assigns period parameters as attributes of self for use by other methods
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
none
'''
ConsIndShockSetup.assignParameters(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
self.pLvlGrid = pLvlGrid
def setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac):
'''
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs. Human wealth
is stored as a function of permanent income.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None
'''
# Run basic version of this method
ConsIndShockSetup.setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac)
# Replace normalized human wealth (scalar) with human wealth level as function of permanent income
self.hNrmNow = None
if hasattr(self,'PermIncCorr'): # This prevents needing to make a whole new method
Corr = self.PermIncCorr # just for persistent shocks do to the pLvlGrid**Corr below
else:
Corr = 1.0
pLvlCount = self.pLvlGrid.size
IncShkCount = self.PermShkValsNext.size
PermIncNext = np.tile(self.pLvlGrid**Corr,(IncShkCount,1))*np.tile(self.PermShkValsNext,(pLvlCount,1)).transpose()
hLvlGrid = 1.0/self.Rfree*np.sum((np.tile(self.PermGroFac*self.TranShkValsNext,(pLvlCount,1)).transpose()*PermIncNext + solution_next.hLvl(PermIncNext))*np.tile(self.ShkPrbsNext,(pLvlCount,1)).transpose(),axis=0)
self.hLvlNow = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(hLvlGrid,0,0.0))
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
None
'''
# Everything is the same as base model except the constrained consumption function has to be 2D
ConsIndShockSetup.defBoroCnst(self,BoroCnstArt)
self.cFuncNowCnst = BilinearInterp(np.array([[0.0,-self.mNrmMinNow],[1.0,1.0-self.mNrmMinNow]]),
np.array([0.0,1.0]),np.array([0.0,1.0]))
# And we also define minimum market resources and natural borrowing limit as a function
self.mLvlMinNow = LinearInterp([0.0,1.0],[0.0,self.mNrmMinNow]) # function of permanent income level
self.BoroCnstNat = LinearInterp([0.0,1.0],[0.0,copy(self.BoroCnstNat)])
def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period normalized assets, the grid of permanent income
levels, and the distribution of shocks he might experience next period.
Parameters
----------
None
Returns
-------
aLvlNow : np.array
2D array of end-of-period assets; also stored as attribute of self.
pLvlNow : np.array
2D array of permanent income levels this period.
'''
if hasattr(self,'PermIncCorr'):
Corr = self.PermIncCorr
else:
Corr = 1.0
ShkCount = self.TranShkValsNext.size
pLvlCount = self.pLvlGrid.size
aNrmCount = self.aXtraGrid.size
pLvlNow = np.tile(self.pLvlGrid,(aNrmCount,1)).transpose()
aLvlNow = np.tile(self.aXtraGrid,(pLvlCount,1))*pLvlNow + self.BoroCnstNat(pLvlNow)
pLvlNow_tiled = np.tile(pLvlNow,(ShkCount,1,1))
aLvlNow_tiled = np.tile(aLvlNow,(ShkCount,1,1)) # shape = (ShkCount,pLvlCount,aNrmCount)
if self.pLvlGrid[0] == 0.0: # aLvl turns out badly if pLvl is 0 at bottom
aLvlNow[0,:] = self.aXtraGrid
aLvlNow_tiled[:,0,:] = np.tile(self.aXtraGrid,(ShkCount,1))
# Tile arrays of the income shocks and put them into useful shapes
PermShkVals_tiled = np.transpose(np.tile(self.PermShkValsNext,(aNrmCount,pLvlCount,1)),(2,1,0))
TranShkVals_tiled = np.transpose(np.tile(self.TranShkValsNext,(aNrmCount,pLvlCount,1)),(2,1,0))
ShkPrbs_tiled = np.transpose(np.tile(self.ShkPrbsNext,(aNrmCount,pLvlCount,1)),(2,1,0))
# Get cash on hand next period
pLvlNext = pLvlNow_tiled**Corr*PermShkVals_tiled*self.PermGroFac
mLvlNext = self.Rfree*aLvlNow_tiled + pLvlNext*TranShkVals_tiled
# Store and report the results
self.ShkPrbs_temp = ShkPrbs_tiled
self.pLvlNext = pLvlNext
self.mLvlNext = mLvlNext
self.aLvlNow = aLvlNow
return aLvlNow, pLvlNow
def calcEndOfPrdvP(self):
'''
Calculates end-of-period marginal value of assets at each state space
point in aLvlNow x pLvlNow. Does so by taking a weighted sum of next
period marginal values across income shocks (in preconstructed grids
self.mLvlNext x self.pLvlNext).
Parameters
----------
None
Returns
-------
EndOfPrdVP : np.array
A 2D array of end-of-period marginal value of assets.
'''
EndOfPrdvP = self.DiscFacEff*self.Rfree*np.sum(self.vPfuncNext(self.mLvlNext,self.pLvlNext)*self.ShkPrbs_temp,axis=0)
return EndOfPrdvP
def makeEndOfPrdvFunc(self,EndOfPrdvP):
'''
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aLvlNow x self.pLvlGrid.
Returns
-------
none
'''
VLvlNext = self.vFuncNext(self.mLvlNext,self.pLvlNext) # value in many possible future states
EndOfPrdv = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbs_temp,axis=0) # expected value, averaging across states
EndOfPrdvNvrs = self.uinv(EndOfPrdv) # value transformed through inverse utility
EndOfPrdvNvrsP = EndOfPrdvP*self.uinvP(EndOfPrdv)
# Add points at mLvl=zero
EndOfPrdvNvrs = np.concatenate((np.zeros((self.pLvlGrid.size,1)),EndOfPrdvNvrs),axis=1)
if hasattr(self,'MedShkDstn'):
EndOfPrdvNvrsP = np.concatenate((np.zeros((self.pLvlGrid.size,1)),EndOfPrdvNvrsP),axis=1)
else:
EndOfPrdvNvrsP = np.concatenate((np.reshape(EndOfPrdvNvrsP[:,0],(self.pLvlGrid.size,1)),EndOfPrdvNvrsP),axis=1) # This is a very good approximation, vNvrsPP = 0 at the asset minimum
aLvl_temp = np.concatenate((np.reshape(self.BoroCnstNat(self.pLvlGrid),(self.pLvlGrid.size,1)),self.aLvlNow),axis=1)
# Make an end-of-period value function for each permanent income level in the grid
EndOfPrdvNvrsFunc_list = []
for p in range(self.pLvlGrid.size):
EndOfPrdvNvrsFunc_list.append(CubicInterp(aLvl_temp[p,:]-self.BoroCnstNat(self.pLvlGrid[p]),EndOfPrdvNvrs[p,:],EndOfPrdvNvrsP[p,:]))
EndOfPrdvNvrsFuncBase = LinearInterpOnInterp1D(EndOfPrdvNvrsFunc_list,self.pLvlGrid)
# Re-adjust the combined end-of-period value function to account for the natural borrowing constraint shifter
EndOfPrdvNvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvNvrsFuncBase,self.BoroCnstNat)
self.EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc,self.CRRA)
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow):
'''
Finds endogenous interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
cLvlNow = self.uPinv(EndOfPrdvP)
mLvlNow = cLvlNow + aLvlNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.concatenate((np.zeros((self.pLvlGrid.size,1)),cLvlNow),axis=-1)
m_for_interpolation = np.concatenate((self.BoroCnstNat(np.reshape(self.pLvlGrid,(self.pLvlGrid.size,1))),mLvlNow),axis=-1)
# Limiting consumption is MPCmin*mLvl as p approaches 0
m_temp = np.reshape(m_for_interpolation[0,:],(1,m_for_interpolation.shape[1]))
m_for_interpolation = np.concatenate((m_temp,m_for_interpolation),axis=0)
c_for_interpolation = np.concatenate((self.MPCminNow*m_temp,c_for_interpolation),axis=0)
return c_for_interpolation, m_for_interpolation
def usePointsForInterpolation(self,cLvl,mLvl,pLvl,interpolator):
'''
Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cLvl : np.array
Consumption points for interpolation.
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
# Construct the unconstrained consumption function
cFuncNowUnc = interpolator(mLvl,pLvl,cLvl)
# Combine the constrained and unconstrained functions into the true consumption function
cFuncNow = LowerEnvelope2D(cFuncNowUnc,self.cFuncNowCnst)
# Make the marginal value function
vPfuncNow = self.makevPfunc(cFuncNow)
# Pack up the solution and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow)
return solution_now
def makevPfunc(self,cFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
cFunc : function
Consumption function this period, defined over market resources and
permanent income level.
Returns
-------
vPfunc : function
Marginal value (of market resources) function for this period.
'''
vPfunc = MargValueFunc2D(cFunc,self.CRRA)
return vPfunc
def makevFunc(self,solution):
'''
Creates the value function for this period, defined over market resources
m and permanent income p. self must have the attribute EndOfPrdvFunc in
order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
market resources m and permanent income p: v = vFuncNow(m,p).
'''
mSize = self.aXtraGrid.size
pSize = self.pLvlGrid.size
# Compute expected value and marginal value on a grid of market resources
pLvl_temp = np.tile(self.pLvlGrid,(mSize,1))
mLvl_temp = np.tile(self.mLvlMinNow(self.pLvlGrid),(mSize,1)) + np.tile(np.reshape(self.aXtraGrid,(mSize,1)),(1,pSize))*pLvl_temp
cLvlNow = solution.cFunc(mLvl_temp,pLvl_temp)
aLvlNow = mLvl_temp - cLvlNow
vNow = self.u(cLvlNow) + self.EndOfPrdvFunc(aLvlNow,pLvl_temp)
vPnow = self.uP(cLvlNow)
# Calculate pseudo-inverse value and its first derivative (wrt mLvl)
vNvrs = self.uinv(vNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNow)
# Add data at the lower bound of m
mLvl_temp = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pSize)),mLvl_temp),axis=0)
vNvrs = np.concatenate((np.zeros((1,pSize)),vNvrs),axis=0)
vNvrsP = np.concatenate((self.MPCmaxEff**(-self.CRRA/(1.0-self.CRRA))*np.ones((1,pSize)),vNvrsP),axis=0)
# Add data at the lower bound of p
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
mLvl_temp = np.concatenate((np.reshape(mLvl_temp[:,0],(mSize+1,1)),mLvl_temp),axis=1)
vNvrs = np.concatenate((np.zeros((mSize+1,1)),vNvrs),axis=1)
vNvrsP = np.concatenate((MPCminNvrs*np.ones((mSize+1,1)),vNvrsP),axis=1)
# Construct the pseudo-inverse value function
vNvrsFunc_list = []
for j in range(pSize+1):
pLvl = np.insert(self.pLvlGrid,0,0.0)[j]
vNvrsFunc_list.append(CubicInterp(mLvl_temp[:,j]-self.mLvlMinNow(pLvl),vNvrs[:,j],vNvrsP[:,j],MPCminNvrs*self.hLvlNow(pLvl),MPCminNvrs))
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_list,np.insert(self.pLvlGrid,0,0.0)) # Value function "shifted"
vNvrsFuncNow = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow)
# "Re-curve" the pseudo-inverse value function into the value function
vFuncNow = ValueFunc2D(vNvrsFuncNow,self.CRRA)
return vFuncNow
def makeBasicSolution(self,EndOfPrdvP,aLvl,pLvl,interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvl : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
pLvl : np.array
Array of permanent income levels that yield the marginal values
in EndOfPrdvP (corresponding pointwise to aLvl).
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
cLvl,mLvl = self.getPointsForInterpolation(EndOfPrdvP,aLvl)
pLvl_temp = np.concatenate((np.reshape(self.pLvlGrid,(self.pLvlGrid.size,1)),pLvl),axis=-1)
pLvl_temp = np.concatenate((np.zeros((1,mLvl.shape[1])),pLvl_temp))
solution_now = self.usePointsForInterpolation(cLvl,mLvl,pLvl_temp,interpolator)
return solution_now
def makeCurvilinearcFunc(self,mLvl,pLvl,cLvl):
'''
Makes a curvilinear interpolation to represent the (unconstrained)
consumption function. No longer used by solver, will be deleted in future.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Permanent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : LinearInterp
The unconstrained consumption function for this period.
'''
cFuncUnc = Curvilinear2DInterp(f_values=cLvl.transpose(),x_values=mLvl.transpose(),y_values=pLvl.transpose())
return cFuncUnc
def makeLinearcFunc(self,mLvl,pLvl,cLvl):
'''
Makes a quasi-bilinear interpolation to represent the (unconstrained)
consumption function.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Permanent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : LinearInterp
The unconstrained consumption function for this period.
'''
cFunc_by_pLvl_list = [] # list of consumption functions for each pLvl
for j in range(pLvl.shape[0]):
pLvl_j = pLvl[j,0]
m_temp = mLvl[j,:] - self.BoroCnstNat(pLvl_j)
c_temp = cLvl[j,:] # Make a linear consumption function for this pLvl
if pLvl_j > 0:
cFunc_by_pLvl_list.append(LinearInterp(m_temp,c_temp,lower_extrap=True,slope_limit=self.MPCminNow,intercept_limit=self.MPCminNow*self.hLvlNow(pLvl_j)))
else:
cFunc_by_pLvl_list.append(LinearInterp(m_temp,c_temp,lower_extrap=True))
pLvl_list = pLvl[:,0]
cFuncUncBase = LinearInterpOnInterp1D(cFunc_by_pLvl_list,pLvl_list) # Combine all linear cFuncs
cFuncUnc = VariableLowerBoundFunc2D(cFuncUncBase,self.BoroCnstNat) # Re-adjust for natural borrowing constraint (as lower bound)
return cFuncUnc
def makeCubiccFunc(self,mLvl,pLvl,cLvl):
'''
Makes a quasi-cubic spline interpolation of the unconstrained consumption
function for this period. Function is cubic splines with respect to mLvl,
but linear in pLvl.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Permanent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
# Calculate the MPC at each gridpoint
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,self.pLvlNext)*self.ShkPrbs_temp,axis=0)
dcda = EndOfPrdvPP/self.uPP(np.array(cLvl[1:,1:]))
MPC = dcda/(dcda+1.)
MPC = np.concatenate((self.MPCmaxNow*np.ones((self.pLvlGrid.size,1)),MPC),axis=1)
MPC = np.concatenate((self.MPCminNow*np.ones((1,self.aXtraGrid.size+1)),MPC),axis=0)
# Make cubic consumption function with respect to mLvl for each permanent income level
cFunc_by_pLvl_list = [] # list of consumption functions for each pLvl
for j in range(pLvl.shape[0]):
pLvl_j = pLvl[j,0]
m_temp = mLvl[j,:] - self.BoroCnstNat(pLvl_j)
c_temp = cLvl[j,:] # Make a cubic consumption function for this pLvl
MPC_temp = MPC[j,:]
if pLvl_j > 0:
cFunc_by_pLvl_list.append(CubicInterp(m_temp,c_temp,MPC_temp,lower_extrap=True,slope_limit=self.MPCminNow,intercept_limit=self.MPCminNow*self.hLvlNow(pLvl_j)))
else: # When pLvl=0, cFunc is linear
cFunc_by_pLvl_list.append(LinearInterp(m_temp,c_temp,lower_extrap=True))
pLvl_list = pLvl[:,0]
cFuncUncBase = LinearInterpOnInterp1D(cFunc_by_pLvl_list,pLvl_list) # Combine all linear cFuncs
cFuncUnc = VariableLowerBoundFunc2D(cFuncUncBase,self.BoroCnstNat) # Re-adjust for lower bound of natural borrowing constraint
return cFuncUnc
def addMPCandHumanWealth(self,solution):
'''
Take a solution and add human wealth and the bounding MPCs to it.
Parameters
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem.
Returns:
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem, but now
with human wealth and the bounding MPCs.
'''
solution.hNrm = 0.0 # Can't have None or setAndUpdateValues breaks, should fix
solution.hLvl = self.hLvlNow
solution.mLvlMin= self.mLvlMinNow
solution.MPCmin = self.MPCminNow
solution.MPCmax = self.MPCmaxEff
return solution
def addvPPfunc(self,solution):
'''
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
'''
vPPfuncNow = MargMargValueFunc2D(solution.cFunc,self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution
def solve(self):
'''
Solves a one period consumption saving problem with risky income, with
permanent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and permanent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of permanent income. Might also include a value function and
marginal marginal value function, depending on options selected.
'''
aLvl,pLvl = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubiccFunc
else:
interpolator = self.makeLinearcFunc
solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
solution.vFunc = self.makevFunc(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution
def solveConsIndShockExplicitPermInc(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Solves the one period problem of a consumer who experiences permanent and
transitory shocks to his income; the permanent income level is tracked as a
state variable rather than normalized out as in ConsIndShock.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. Currently ignored, with BoroCnstArt=0 used implicitly.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and permanent income), a
marginal value function, bounding MPCs, and normalized human wealth.
'''
solver = ConsIndShockSolverExplicitPermInc(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now
###############################################################################
class ConsPersistentShockSolver(ConsIndShockSolverExplicitPermInc):
'''
A class for solving a consumption-saving problem with transitory and persistent
shocks to income. Transitory shocks are identical to the IndShocks model,
while (log) permanent income follows an AR1 process rather than a random walk.
'''
def __init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver for a one period problem with idiosyncratic
shocks to permanent and transitory income. Transitory shocks are iid,
while (log) permanent income follows an AR1 process.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
PermIncCorr : float
Correlation of permanent income from period to period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
ConsIndShockSolverExplicitPermInc.__init__(self,solution_next,IncomeDstn,
LivPrb,DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
self.PermIncCorr = PermIncCorr
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable (normalized) assets
to end the period with. If it is less than the natural borrowing
constraint at a particular permanent income level, then it is irrelevant;
BoroCnstArt=None indicates no artificial borrowing constraint.
Returns
-------
None
'''
# Find minimum allowable end-of-period assets at each permanent income level
PermIncMinNext = self.PermGroFac*self.PermShkMinNext*self.pLvlGrid**self.PermIncCorr
IncLvlMinNext = PermIncMinNext*self.TranShkMinNext
aLvlMin = (self.solution_next.mLvlMin(PermIncMinNext) - IncLvlMinNext)/self.Rfree
# Make a function for the natural borrowing constraint by permanent income
BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(aLvlMin,0,0.0))
self.BoroCnstNat = BoroCnstNat
# Define the constrained portion of the consumption function and the
# minimum allowable level of market resources by permanent income
tempFunc = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([0.0,1.0]),np.array([0.0,1.0])) # consume everything
cFuncNowCnstNat = VariableLowerBoundFunc2D(tempFunc,BoroCnstNat)
if self.BoroCnstArt is not None:
cFuncNowCnstArt = BilinearInterp(np.array([[0.0,-self.BoroCnstArt],[1.0,1.0-self.BoroCnstArt]]),
np.array([0.0,1.0]),np.array([0.0,1.0]))
self.cFuncNowCnst = LowerEnvelope2D(cFuncNowCnstNat,cFuncNowCnstArt)
self.mLvlMinNow = lambda p : np.maximum(BoroCnstNat(p),self.BoroCnstArt*p)
else:
self.cFuncNowCnst = cFuncNowCnstNat
self.mLvlMinNow = BoroCnstNat
self.mNrmMinNow = 0.0 # Needs to exist so as not to break when solution is created
self.MPCmaxEff = 0.0 # Actually might vary by p, but no use formulating as a function
def solveConsPersistentShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,PermIncCorr,
BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Solves the one period problem of a consumer who experiences permanent and
transitory shocks to his income; transitory shocks are iid, while (log) perm-
anent income follows an AR1 process.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
PermIncCorr : float
Correlation of permanent income from period to period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. Currently ignored, with BoroCnstArt=0 used implicitly.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and permanent income), a
marginal value function, bounding MPCs, and normalized human wealth.
'''
solver = ConsPersistentShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now
###############################################################################
class IndShockExplicitPermIncConsumerType(IndShockConsumerType):
'''
A consumer type with idiosyncratic shocks to permanent and transitory income.
His problem is defined by a sequence of income distributions, survival prob-
abilities, and permanent income growth rates, as well as time invariant values
for risk aversion, discount factor, the interest rate, the grid of end-of-
period assets, and an artificial borrowing constraint. Identical to the
IndShockConsumerType except that permanent income is tracked as a state
variable rather than normalized out.
'''
cFunc_terminal_ = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([0.0,1.0]),np.array([0.0,1.0]))
solution_terminal_ = ConsumerSolution(cFunc = cFunc_terminal_, mNrmMin=0.0, hNrm=0.0, MPCmin=1.0, MPCmax=1.0)
poststate_vars_ = ['aLvlNow','pLvlNow']
def __init__(self,cycles=1,time_flow=True,**kwds):
'''
Instantiate a new ConsumerType with given data.
See ConsumerParameters.init_explicit_perm_inc for a dictionary of the
keywords that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
# Initialize a basic ConsumerType
IndShockConsumerType.__init__(self,cycles=cycles,time_flow=time_flow,**kwds)
self.solveOnePeriod = solveConsIndShockExplicitPermInc # idiosyncratic shocks solver with explicit permanent income
def update(self):
'''
Update the income process, the assets grid, the permanent income grid,
and the terminal solution.
Parameters
----------
None
Returns
-------
None
'''
IndShockConsumerType.update(self)
self.updatePermIncGrid()
def updateSolutionTerminal(self):
'''
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
None
Returns
-------
None
'''
self.solution_terminal.vFunc = ValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPfunc = MargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.hNrm = 0.0 # Don't track normalized human wealth
self.solution_terminal.hLvl = lambda p : np.zeros_like(p) # But do track absolute human wealth by permanent income
self.solution_terminal.mLvlMin = lambda p : np.zeros_like(p) # And minimum allowable market resources by perm inc
def updatePermIncGrid(self):
'''
Update the grid of permanent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1 because the distribution of permanent
income will be different within a period depending on how many cycles
have elapsed.
Parameters
----------
None
Returns
-------
None
'''
if self.cycles == 1:
PermIncStdNow = self.PermIncStdInit # get initial distribution of permanent income
PermIncAvgNow = self.PermIncAvgInit
PermIncGrid = [] # empty list of time-varying permanent income grids
# Calculate distribution of permanent income in each period of lifecycle
for t in range(len(self.PermShkStd)):
PermIncGrid.append(approxLognormal(mu=(np.log(PermIncAvgNow)-0.5*PermIncStdNow**2),
sigma=PermIncStdNow, N=self.PermIncCount, tail_N=self.PermInc_tail_N, tail_bound=[0.05,0.95])[1])
if type(self.PermShkStd[t]) == list:
temp_std = max(self.PermShkStd[t])
temp_fac = max(self.PermGroFac[t])
else:
temp_std = self.PermShkStd[t]
temp_fac = self.PermGroFac[t]
PermIncStdNow = np.sqrt(PermIncStdNow**2 + temp_std**2)
PermIncAvgNow = PermIncAvgNow*temp_fac
# Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle)
elif self.cycles == 0:
assert np.isclose(np.product(self.PermGroFac),1.0), "Long run permanent income growth not allowed!"
CumLivPrb = np.product(self.LivPrb)
CumDeathPrb = 1.0 - CumLivPrb
CumPermShkStd = np.sqrt(np.sum(np.array(self.PermShkStd)**2))
ExPermShkSq = np.exp(CumPermShkStd**2)
ExPermIncSq = CumDeathPrb/(1.0 - CumLivPrb*ExPermShkSq)
PermIncStdNow = np.sqrt(np.log(ExPermIncSq))
PermIncAvgNow = 1.0
PermIncGrid = [] # empty list of time-varying permanent income grids
# Calculate distribution of permanent income in each period of infinite cycle
for t in range(len(self.PermShkStd)):
PermIncGrid.append(approxLognormal(mu=(np.log(PermIncAvgNow)-0.5*PermIncStdNow**2),
sigma=PermIncStdNow, N=self.PermIncCount, tail_N=self.PermInc_tail_N, tail_bound=[0.05,0.95])[1])
if type(self.PermShkStd[t]) == list:
temp_std = max(self.PermShkStd[t])
temp_fac = max(self.PermGroFac[t])
else:
temp_std = self.PermShkStd[t]
temp_fac = self.PermGroFac[t]
PermIncStdNow = np.sqrt(PermIncStdNow**2 + temp_std**2)
PermIncAvgNow = PermIncAvgNow*temp_fac
# Throw an error if cycles>1
else:
assert False, "Can only handle cycles=0 or cycles=1!"
# Store the result and add attribute to time_vary
orig_time = self.time_flow
self.timeFwd()
self.pLvlGrid = PermIncGrid
self.addToTimeVary('pLvlGrid')
if not orig_time:
self.timeRev()
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and permanent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
# Get and store states for newly born agents
N = np.sum(which_agents) # Number of new consumers to make
aNrmNow_new = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1))
self.pLvlNow[which_agents] = drawLognormal(N,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.aLvlNow[which_agents] = aNrmNow_new*self.pLvlNow[which_agents]
self.t_age[which_agents] = 0 # How many periods since each agent was born
self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in
return None
def getpLvl(self):
'''
Returns the updated permanent income levels for each agent this period.
Parameters
----------
None
Returns
-------
pLvlNow : np.array
Array of size self.AgentCount with updated permanent income levels.
'''
pLvlNow = self.pLvlNow*self.PermShkNow
return pLvlNow
def getStates(self):
'''
Calculates updated values of normalized market resources and permanent income level for each
agent. Uses pLvlNow, aLvlNow, PermShkNow, TranShkNow.
Parameters
----------
None
Returns
-------
None
'''
aLvlPrev = self.aLvlNow
RfreeNow = self.getRfree()
# Calculate new states: normalized market resources and permanent income level
self.pLvlNow = self.getpLvl() # Updated permanent income level
self.bLvlNow = RfreeNow*aLvlPrev # Bank balances before labor income
self.mLvlNow = self.bLvlNow + self.TranShkNow*self.pLvlNow # Market resources after income
return None
def getControls(self):
'''
Calculates consumption for each consumer of this type using the consumption functions.
Parameters
----------
None
Returns
-------
None
'''
cLvlNow = np.zeros(self.AgentCount) + np.nan
for t in range(self.T_cycle):
these = t == self.t_cycle
cLvlNow[these] = self.solution[t].cFunc(self.mLvlNow[these],self.pLvlNow[these])
self.cLvlNow = cLvlNow
return None
def getPostStates(self):
'''
Calculates end-of-period assets for each consumer of this type.
Identical to version in IndShockConsumerType but uses Lvl rather than Nrm variables.
Parameters
----------
None
Returns
-------
None
'''
self.aLvlNow = self.mLvlNow - self.cLvlNow
return None
###############################################################################
class PersistentShockConsumerType(IndShockExplicitPermIncConsumerType):
'''
A consumer type with idiosyncratic shocks to permanent and transitory income.
His problem is defined by a sequence of income distributions, survival prob-
abilities, and permanent income growth rates, as well as time invariant values
for risk aversion, discount factor, the interest rate, the grid of end-of-
period assets, an artificial borrowing constraint, and the correlation
coefficient for (log) permanent income.
'''
def __init__(self,cycles=1,time_flow=True,**kwds):
'''
Instantiate a new ConsumerType with given data.
See ConsumerParameters.init_persistent_shocks for a dictionary of
the keywords that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
# Initialize a basic ConsumerType
IndShockConsumerType.__init__(self,cycles=cycles,time_flow=time_flow,**kwds)
self.solveOnePeriod = solveConsPersistentShock # persistent shocks solver
self.addToTimeInv('PermIncCorr')
def getpLvl(self):
'''
Returns the updated permanent income levels for each agent this period. Identical to version
in IndShockExplicitPermIncConsumerType.getpLvl except that PermIncCorr is used.
Parameters
----------
None
Returns
-------
pLvlNow : np.array
Array of size self.AgentCount with updated permanent income levels.
'''
pLvlNow = self.pLvlNow**self.PermIncCorr*self.PermShkNow
return pLvlNow
###############################################################################
if __name__ == '__main__':
import ConsumerParameters as Params
from time import clock
import matplotlib.pyplot as plt
mystr = lambda number : "{:.4f}".format(number)
do_simulation = True
# Make and solve an example "explicit permanent income" consumer with idiosyncratic shocks
ExplicitExample = IndShockExplicitPermIncConsumerType(**Params.init_explicit_perm_inc)
#ExplicitExample.cycles = 1
t_start = clock()
ExplicitExample.solve()
t_end = clock()
print('Solving an explicit permanent income consumer took ' + mystr(t_end-t_start) + ' seconds.')
# Plot the consumption function at various permanent income levels
pGrid = np.linspace(0,3,24)
M = np.linspace(0,20,300)
for p in pGrid:
M_temp = M+ExplicitExample.solution[0].mLvlMin(p)
C = ExplicitExample.solution[0].cFunc(M_temp,p*np.ones_like(M_temp))
plt.plot(M_temp,C)
plt.show()
# Plot the value function at various permanent income levels
if ExplicitExample.vFuncBool:
pGrid = np.linspace(0.1,3,24)
M = np.linspace(0.001,5,300)
for p in pGrid:
M_temp = M+ExplicitExample.solution[0].mLvlMin(p)
C = ExplicitExample.solution[0].vFunc(M_temp,p*np.ones_like(M_temp))
plt.plot(M_temp,C)
plt.ylim([-200,0])
plt.show()
# Simulate some data
if do_simulation:
ExplicitExample.T_sim = 500
ExplicitExample.track_vars = ['mLvlNow','cLvlNow','pLvlNow']
ExplicitExample.makeShockHistory() # This is optional
ExplicitExample.initializeSim()
ExplicitExample.simulate()
plt.plot(np.mean(ExplicitExample.mLvlNow_hist,axis=1))
plt.show()
# Make and solve an example "persistent idisyncratic shocks" consumer
PersistentExample = PersistentShockConsumerType(**Params.init_persistent_shocks)
#PersistentExample.cycles = 1
t_start = clock()
PersistentExample.solve()
t_end = clock()
print('Solving a persistent income shocks consumer took ' + mystr(t_end-t_start) + ' seconds.')
# Plot the consumption function at various permanent income levels
pGrid = np.linspace(0.1,3,24)
M = np.linspace(0,20,300)
for p in pGrid:
M_temp = M + PersistentExample.solution[0].mLvlMin(p)
C = PersistentExample.solution[0].cFunc(M_temp,p*np.ones_like(M_temp))
plt.plot(M_temp,C)
plt.show()
# Plot the value function at various permanent income levels
if PersistentExample.vFuncBool:
pGrid = np.linspace(0.1,3,24)
M = np.linspace(0.001,5,300)
for p in pGrid:
M_temp = M+PersistentExample.solution[0].mLvlMin(p)
C = PersistentExample.solution[0].vFunc(M_temp,p*np.ones_like(M_temp))
plt.plot(M_temp,C)
plt.ylim([-200,0])
plt.show()
# Simulate some data
if do_simulation:
PersistentExample.T_sim = 500
PersistentExample.track_vars = ['mLvlNow','cLvlNow','pLvlNow']
PersistentExample.initializeSim()
PersistentExample.simulate()
plt.plot(np.mean(PersistentExample.mLvlNow_hist,axis=1))
plt.show() | apache-2.0 |
pradyu1993/scikit-learn | sklearn/utils/tests/test_extmath.py | 1 | 7264 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.utils.extmath import weighted_mode
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1,2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
| bsd-3-clause |
BarbaraMcG/survey-mining | create_graph_27112016.py | 1 | 2573 | # Module for creating graphs from responses and their keywords
#import plotly.plotly as py
#from plotly.graph_objs import *
import networkx as nx
import matplotlib.pyplot as plt
import time
import os
import csv
def create_graph(input_path, input_file, output_path, output_file, min_freq):
# Create empty graph:
G = nx.Graph()
# Populate the graph with the keywords/clusters as nodes:
id_response = ""
keyword_nodes = list()
kw2response = dict() # maps a kw to the list of responses containing it
with open(os.path.join(input_path, input_file), 'rb') as csv_file:
next(csv_file)
res_reader = csv.reader(csv_file, delimiter=',')
my_count = 0
for row in res_reader:
id_response = row[0]
kw = row[2]
keyword_nodes.append(kw)
if kw in kw2response:
responses_for_kw = kw2response[kw]
responses_for_kw.append(id_response)
kw2response[kw] = responses_for_kw
else:
kw2response[kw] = [id_response]
keyword_nodes = list(set(keyword_nodes))
keyword_nodes.sort()
# only keep keywords that occur in more than min_response response:
for kw in keyword_nodes:
if len(kw2response[kw]) < min_freq + 1:
keyword_nodes.remove(kw)
for kw in keyword_nodes:
G.add_node(kw)
#print(str(G.nodes()))
# Add edges to graph:
# two nodes (keywords) are connected if they appear in the same response:
for kw1 in keyword_nodes:
responses_kw1 = kw2response[kw1]
for kw2 in keyword_nodes:
responses_kw2 = kw2response[kw2]
overlap = len(list(set(responses_kw1).intersection(responses_kw2)))
if overlap > 0:
G.add_edge(kw1, kw2, weight = overlap^2)
#print(str(G.edges()))
# Plot graph:
pos = nx.spring_layout(G) # positions for all nodes
edges = G.edges()
weights = [G[u][v]['weight'] for u,v in edges]
nx.draw(G, pos, edges = edges, width=weights, with_labels = True)
#plt.show()
plt.savefig(os.path.join(output_path, output_file))
#create_graph("C:\Users\Barbara\Dropbox\Resofact\data\output\Telkom_pride_embarassment\Telkom_pride", "Responses_keywords_pride_23102016_test.csv", "C:\Users\Barbara\Dropbox\Resofact\code\plots", 'graph_pride_' + time.strftime("%d%m%Y") + ".png")
| gpl-3.0 |
gustfrontar/LETKF_WRF | wrf/obs/radar/radar_qc_cfradial/test/test_radar_qc.py | 1 | 3276 | #print __doc__
# Author: Rapid Refresh Argentina Team
# License: BSD 3 clause
# History: Created 10-2017
#Add path to additional modules.
import sys
sys.path.append('./src/python' )
sys.path.append('./src/fortran')
import numpy as np
import matplotlib.pyplot as plt
import pyart
import netCDF4
import radar_qc_module as rqc
#======================================
# QC PARAMETERS
#======================================
options = {}
#Flags
options['ifdealias']=False
options['ifrhofilter']=False #Rhohv filter
options['ifattfilter']=False #Attenuation filter
options['ifetfilter']=True #Echo top filter
options['ifedfilter']=False #Echo depth filter
options['ifspfilter']=False #Speckle filter
options['ifblfilter']=False #Blocking filter
options['ifmissfilter']=False #Missing values filter
#General
options['ref_name']='dBZ' #Reflectivity
options['cref_name']='CdBZ' #Corrected reflectivity (qc output)
options['v_name']='V' #Dopper velocity
options['cv_name']='CV' #Corrected wind (qc ouput)
options['rho_name']='RhoHV' #Rho HV
options['norainrefval']=-0.1
options['undef']=-9.99e9
#Dealiasing parameters (pyart)
options['interval_split']=3
options['skip_between_rays']=10
options['skip_along_rays']=10
#Rho filter parameters
options['rhofilternx']=2
options['rhofilterny']=2
options['rhofilternz']=0
options['rhofiltertr']=0.5
#Echo top parameters #Filter layeers with echo top below a certain threshold.
options['etfilternx']=2 #Smooth parameter in x
options['etfilterny']=2 #Smooth parameter in y
options['etfilternz']=0 #Smooth parameter in z (dummy)
options['etfiltertr']=3000 #Echo top threshold.
options['etfilter_save']=True #Wether echo top will be included in the output
#Echo depth parameters #Filters layers with depths below a certain threshold.
options['edfilternx']=2 #Smooth parameter in x
options['edfilterny']=2 #Smooth parameter in y
options['edfilternz']=0 #Smooth parameter in z (dummy)
options['edfiltertr']=3000 #Echo top threshold.
options['edfilter_save']=True #Wether echo top will be included in the output
#Speckle parameters
options['spfilternx']=2 #Box size in X NX=(2*spfilternx + 1)
options['spfilterny']=2 #Box size in Y
options['spfilternz']=0 #Box size in Z
options['spfilterreftr']=5 #Reflectivity threshold
options['spfiltertr']=0.3 #Count threshold
options['spfilter_save']=True #Save filter fields.
#Attenuation parameters
options['attfiltertr']=20.0 #Attenuation threshold in dBZ
options['attcalerror']=1.0 #Calibration error
options['attfilter_save']=True #Save filter fields
#Blocking parameters
#Detect missing parameters
#=======================================
# read in the file, create a RadarMapDisplay object
filename = '/media/jruiz/PAWR/Dropbox/DATA/DATOS_RADAR/PARANA/PAR_20091117_120/cfrad.20091117_210346.000_to_20091117_210735.000_PAR_SUR.nc'
#Performs QC operations based on options
[radar , qc_output] = rqc.main_qc( filename , options )
print('End of QC')
qc_output['echo_top'][ qc_output['echo_top'] == options['undef'] ] = 0.0
plt.figure()
plt.pcolor(qc_output['echo_top'][:,:,1])
plt.show()
| gpl-3.0 |
zihua/scikit-learn | benchmarks/bench_isolation_forest.py | 46 | 3782 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 30 | 1812 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
pandeyadarsh/sympy | sympy/plotting/plot_implicit.py | 83 | 14400 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| bsd-3-clause |
jnaulty/cloudbrain | tests/unit/fft_test.py | 2 | 6080 | import numpy as np
import unittest
from cloudbrain.modules.transforms.fft import FrequencyBandTransformer
def generate_sine_wave(number_points, sampling_frequency, alpha_amplitude, alpha_freq,
beta_amplitude, beta_freq):
sample_spacing = 1.0 / sampling_frequency
x = np.linspace(start=0.0, stop=number_points * sample_spacing, num=number_points)
alpha = alpha_amplitude * np.sin(alpha_freq * 2.0 * np.pi * x)
beta = beta_amplitude * np.sin(beta_freq * 2.0 * np.pi * x)
import random
y = alpha + beta + min(alpha_amplitude, beta_amplitude) / 2.0 * random.random()
return y
def generate_mock_data(num_channels, number_points, sampling_frequency, buffer_size,
alpha_amplitude, alpha_freq, beta_amplitude, beta_freq):
sine_wave = generate_sine_wave(number_points, sampling_frequency, alpha_amplitude, alpha_freq,
beta_amplitude, beta_freq)
buffers = []
# if number_points = 250 and buffer_size = 40, then num_buffers = 6 + 1
num_buffers = number_points / buffer_size
if number_points % buffer_size != 0:
num_buffers += 1 # you need to account for the almost full buffer
t0 = 0
sample_spacing = 1.0 / sampling_frequency
for i in range(num_buffers):
if (i + 1) * buffer_size < len(sine_wave):
y_chunk = sine_wave[i * buffer_size:(i + 1) * buffer_size]
else:
y_chunk = sine_wave[i * buffer_size:]
buffer = []
for j in range(len(y_chunk)):
timestamp_in_s = t0 + (buffer_size * i + j) * sample_spacing
timestamp = int(timestamp_in_s * 1000)
datapoint = {'timestamp': timestamp}
for k in range(num_channels):
datapoint['channel_%s' % k] = y_chunk[j]
buffer.append(datapoint)
buffers.append(buffer)
return buffers
def plot_cb_buffers(num_channels, cb_buffers):
maxi_buffer = []
for cb_buffer in cb_buffers:
maxi_buffer.extend(cb_buffer)
plot_cb_buffer(num_channels, maxi_buffer)
def plot_cb_buffer(num_channels, cb_buffer):
import matplotlib.pyplot as plt
f, axarr = plt.subplots(num_channels)
for i in range(num_channels):
channel_name = 'channel_%s' % i
data_to_plot = []
for data in cb_buffer:
data_to_plot.append(data[channel_name])
axarr[i].plot(data_to_plot)
axarr[i].set_title(channel_name)
plt.show()
def generate_frequency_bands(alpha_freq, beta_freq, frequency_band_size):
"""
Make sure to choose the frequency band size carefully.
If it is too high, frequency bands will overlap.
If it's too low, the band might not be large enough to detect the frequency.
"""
alpha_range = [alpha_freq - frequency_band_size / 2.0, alpha_freq + frequency_band_size / 2.0]
beta_range = [beta_freq - frequency_band_size / 2.0, beta_freq + frequency_band_size / 2.0]
frequency_bands = {'alpha': alpha_range, 'beta': beta_range}
return frequency_bands
class FrequencyBandTranformerTest(unittest.TestCase):
def setUp(self):
self.plot_input_data = False
self.window_size = 250 # Also OK => 2 * 2.50. Or 3 * 250
self.sampling_frequency = 250.0
self.buffer_size = 10
self.alpha_amplitude = 10.0
self.alpha_freq = 10.0
self.beta_amplitude = 5.0
self.beta_freq = 25.0
self.num_channels = 8
self.frequency_band_size = 10.0
self.number_points = 250
self.cb_buffers = generate_mock_data(self.num_channels, self.number_points,
self.sampling_frequency,
self.buffer_size, self.alpha_amplitude,
self.alpha_freq,
self.beta_amplitude, self.beta_freq)
def test_cb_buffers(self):
if self.plot_input_data:
plot_cb_buffers(self.num_channels, self.cb_buffers)
num_buffers = self.number_points / self.buffer_size
if self.window_size % self.buffer_size != 0:
num_buffers += 1
self.assertEqual(len(self.cb_buffers), num_buffers)
self.assertEqual(len(self.cb_buffers[0]), self.buffer_size)
self.assertTrue('channel_0' in self.cb_buffers[0][0].keys())
def test_module(self):
self.frequency_bands = generate_frequency_bands(self.alpha_freq, self.beta_freq,
self.frequency_band_size)
self.subscribers = []
self.publishers = []
module = FrequencyBandTransformer(subscribers=self.subscribers, publishers=self.publishers,
window_size=self.window_size,
sampling_frequency=self.sampling_frequency,
frequency_bands=self.frequency_bands)
for cb_buffer in self.cb_buffers:
# where the real logic inside the subscriber takes place
bands = module._compute_fft(cb_buffer, self.num_channels)
if bands:
for i in range(self.num_channels):
channel_name = 'channel_%s' % i
alpha_estimated_amplitude = bands['alpha'][channel_name]
beta_estimated_amplitude = bands['beta'][channel_name]
ratio = self.beta_amplitude / self.alpha_amplitude
estimated_ratio = beta_estimated_amplitude / alpha_estimated_amplitude
print "Alpha: estimated=%s | actual=%s" % (alpha_estimated_amplitude,
self.alpha_amplitude)
print "Beta: estimated=%s | actual=%s" % (beta_estimated_amplitude,
self.beta_amplitude)
assert np.abs((estimated_ratio - ratio) / ratio) < 0.01
| agpl-3.0 |
andrewhart098/Lean | Algorithm.Python/DropboxUniverseSelectionAlgorithm.py | 1 | 3411 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
from datetime import datetime
import decimal as d
import pandas as pd
### <summary>
### In this algortihm we show how you can easily use the universe selection feature to fetch symbols
### to be traded using the BaseData custom data system in combination with the AddUniverse{T} method.
### AddUniverse{T} requires a function that will return the symbols to be traded.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="custom universes" />
class DropboxUniverseSelectionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013,1,1)
self.SetEndDate(2013,12,31)
self.backtestSymbolsPerDay = None
self.current_universe = []
self.UniverseSettings.Resolution = Resolution.Daily;
self.AddUniverse("my-dropbox-universe", self.selector)
def selector(self, data):
# handle live mode file format
if self.LiveMode:
# fetch the file from dropbox
url = "https://www.dropbox.com/s/2az14r5xbx4w5j6/daily-stock-picker-live.csv?dl=1"
df = pd.read_csv(url, header = None)
# if we have a file for today, return symbols
if not df.empty:
self.current_universe = df.iloc[0,:].tolist()
# no symbol today, leave universe unchanged
return self.current_universe
# backtest - first cache the entire file
if self.backtestSymbolsPerDay is None:
url = "https://www.dropbox.com/s/rmiiktz0ntpff3a/daily-stock-picker-backtest.csv?dl=1"
self.backtestSymbolsPerDay = pd.read_csv(url, header = None, index_col = 0)
index = int(data.strftime("%Y%m%d"))
if index in self.backtestSymbolsPerDay.index:
self.current_universe = self.backtestSymbolsPerDay.loc[index,:].dropna().tolist()
return self.current_universe
def OnData(self, slice):
if slice.Bars.Count == 0: return
if self.changes == None: return
# start fresh
self.Liquidate()
percentage = 1 / d.Decimal(slice.Bars.Count)
for tradeBar in slice.Bars.Values:
self.SetHoldings(tradeBar.Symbol, percentage)
# reset changes
self.changes = None
def OnSecuritiesChanged(self, changes):
self.changes = changes | apache-2.0 |
gritlogic/incubator-airflow | airflow/hooks/presto_hook.py | 24 | 3472 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import logging
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
if (hasattr(e, 'message') and
'errorName' in e.message and
'message' in e.message):
# Use the structured error data in the raised exception
raise PrestoException('{name}: {message}'.format(
name=e.message['errorName'], message=e.message['message']))
else:
raise PrestoException(str(e))
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e[0]['message'])
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e[0]['message'])
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplementedError()
| apache-2.0 |
ran5515/DeepDecision | tensorflow/examples/learn/multiple_gpu.py | 13 | 4153 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/gpu:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/gpu:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3)
# and with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
SGenheden/Scripts | Projects/Gpcr/gpcr_anal_exposure.py | 1 | 2615 | # Author: Samuel Genheden [email protected]
"""
Program to draw roughness (fractal) pies on structures
No arguments are necessary, all structures are taken from standard locations
"""
import argparse
import os
import sys
import numpy as np
import scipy.stats as stats
import matplotlib
if not "DISPLAY" in os.environ or os.environ["DISPLAY"] == "" :
matplotlib.use('Agg')
import matplotlib.pylab as plt
import gpcr_lib
# Import the calc_surf program
thispath = os.path.dirname(os.path.abspath(__file__))
oneup = os.path.split(thispath)[0]
sys.path.insert(0,oneup)
import calc_surf
if __name__ == '__main__' :
# Command-line input
parser = argparse.ArgumentParser(description="Analysing residue exposure")
parser.add_argument('-f','--folder', help="the folder with the residue contacts")
parser.add_argument('-p','--probe',type=float,help="the probe size",default=2.4)
parser.add_argument('-c','--percentile',type=float,help="the percentile to consider",default=50)
args = parser.parse_args()
mols = "b2 b2_a a2a a2a_a".split()
data = {}
for mol in mols:
xray, aastruct = gpcr_lib.load_xray(mol, loadsigma=True, loadaa=True)
radii = np.asarray([calc_surf.bornradii[atom.element().upper()] for atom in aastruct.atoms])
xyzrname = calc_surf.write_xyzr(aastruct.xyz,radii)
surf = calc_surf.calc_surf(xyzrname, aastruct.xyz.shape[0], args.probe)
res, contactprob = gpcr_lib.read_rescontacts(args.folder, mol, percentile=args.percentile, returnprobs=True)
print len(res)
mdata = {}
for tres, s in zip(xray.template.residues, surf):
if tres in res :
mdata[tres] = (s[0], contactprob[res==tres][0])
data[mol] = mdata
with open("ses_%s"%mol,"w") as f :
for s, r in zip(surf,xray.template.residues):
f.write("%d %.3f\n"%(r, s[0]))
diff = []
for res in data["b2"]:
if res in data["b2_a"]:
diff.append([data["b2_a"][res][0]-data["b2"][res][0],
data["b2_a"][res][1]-data["b2"][res][1]])
diff = np.asarray(diff)
print "B2: %.3f %.3f"%(np.corrcoef(diff[:,0],diff[:,1])[1,0],
stats.kendalltau(diff[:,0],diff[:,1])[0])
diff = []
for res in data["a2a"]:
if res in data["a2a_a"]:
diff.append([data["a2a_a"][res][0]-data["a2a"][res][0],
data["a2a_a"][res][1]-data["a2a"][res][1]])
diff = np.asarray(diff)
print "A2a: %.3f %.3f"%(np.corrcoef(diff[:,0],diff[:,1])[1,0],
stats.kendalltau(diff[:,0],diff[:,1])[0])
| mit |
zak-k/cartopy | lib/cartopy/examples/reprojected_wmts.py | 1 | 1931 | __tags__ = ['Web services']
"""
Displaying WMTS tiled map data on an arbitrary projection
---------------------------------------------------------
This example displays imagery from a web map tile service on two different
projections, one of which is not provided by the service.
This result can also be interactively panned and zoomed.
The example WMTS layer is a single composite of data sampled over nine days
in April 2012 and thirteen days in October 2012 showing the Earth at night.
It does not vary over time.
The imagery was collected by the Suomi National Polar-orbiting Partnership
(Suomi NPP) weather satellite operated by the United States National Oceanic
and Atmospheric Administration (NOAA).
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def plot_city_lights():
# Define resource for the NASA night-time illumination data.
base_uri = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer_name = 'VIIRS_CityLights_2012'
# Create a Cartopy crs for plain and rotated lat-lon projections.
plain_crs = ccrs.PlateCarree()
rotated_crs = ccrs.RotatedPole(pole_longitude=120.0, pole_latitude=45.0)
# Plot WMTS data in a specific region, over a plain lat-lon map.
ax = plt.subplot(121, projection=plain_crs)
ax.set_extent((-6, 3, 48, 58), crs=ccrs.PlateCarree())
ax.coastlines(resolution='50m', color='yellow')
ax.gridlines(color='lightgrey', linestyle='-')
# Add WMTS imaging.
ax.add_wmts(base_uri, layer_name=layer_name)
# Plot WMTS data on a rotated map, over the same nominal region.
ax = plt.subplot(122, projection=rotated_crs)
ax.set_extent((-6, 3, 48, 58), crs=plain_crs)
ax.coastlines(resolution='50m', color='yellow')
ax.gridlines(color='lightgrey', linestyle='-')
# Add WMTS imaging.
ax.add_wmts(base_uri, layer_name=layer_name)
plt.show()
if __name__ == '__main__':
plot_city_lights()
| lgpl-3.0 |
stevenzhang18/Indeed-Flask | lib/pandas/stats/plm.py | 14 | 24672 | """
Linear regression objects for panel data
"""
# pylint: disable-msg=W0231
# pylint: disable-msg=E1101,E1103
from __future__ import division
from pandas.compat import range
from pandas import compat
import warnings
import numpy as np
from pandas.core.panel import Panel
from pandas.core.frame import DataFrame
from pandas.core.reshape import get_dummies
from pandas.core.series import Series
from pandas.core.sparse import SparsePanel
from pandas.stats.ols import OLS, MovingOLS
import pandas.stats.common as com
import pandas.stats.math as math
from pandas.util.decorators import cache_readonly
class PanelOLS(OLS):
"""Implements panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False,
nw_overlap=False):
self._x_orig = x
self._y_orig = y
self._weights = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
self._entity_effects = entity_effects
self._time_effects = time_effects
self._x_effects = x_effects
self._dropped_dummies = dropped_dummies or {}
self._cluster = com._get_cluster_type(cluster)
self._verbose = verbose
(self._x, self._x_trans,
self._x_filtered, self._y,
self._y_trans) = self._prepare_data()
self._index = self._x.index.levels[0]
self._T = len(self._index)
def log(self, msg):
if self._verbose: # pragma: no cover
print(msg)
def _prepare_data(self):
"""Cleans and stacks input data into DataFrame objects
If time effects is True, then we turn off intercepts and omit an item
from every (entity and x) fixed effect.
Otherwise:
- If we have an intercept, we omit an item from every fixed effect.
- Else, we omit an item from every fixed effect except one of them.
The categorical variables will get dropped from x.
"""
(x, x_filtered, y, weights, cat_mapping) = self._filter_data()
self.log('Adding dummies to X variables')
x = self._add_dummies(x, cat_mapping)
self.log('Adding dummies to filtered X variables')
x_filtered = self._add_dummies(x_filtered, cat_mapping)
if self._x_effects:
x = x.drop(self._x_effects, axis=1)
x_filtered = x_filtered.drop(self._x_effects, axis=1)
if self._time_effects:
x_regressor = x.sub(x.mean(level=0), level=0)
unstacked_y = y.unstack()
y_regressor = unstacked_y.sub(unstacked_y.mean(1), axis=0).stack()
y_regressor.index = y.index
elif self._intercept:
# only add intercept when no time effects
self.log('Adding intercept')
x = x_regressor = add_intercept(x)
x_filtered = add_intercept(x_filtered)
y_regressor = y
else:
self.log('No intercept added')
x_regressor = x
y_regressor = y
if weights is not None:
if not y_regressor.index.equals(weights.index):
raise AssertionError("y_regressor and weights must have the "
"same index")
if not x_regressor.index.equals(weights.index):
raise AssertionError("x_regressor and weights must have the "
"same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
x_regressor = x_regressor.mul(rt_weights, axis=0)
return x, x_regressor, x_filtered, y, y_regressor
def _filter_data(self):
"""
"""
data = self._x_orig
cat_mapping = {}
if isinstance(data, DataFrame):
data = data.to_panel()
else:
if isinstance(data, Panel):
data = data.copy()
if not isinstance(data, SparsePanel):
data, cat_mapping = self._convert_x(data)
if not isinstance(data, Panel):
data = Panel.from_dict(data, intersect=True)
x_names = data.items
if self._weights is not None:
data['__weights__'] = self._weights
# Filter x's without y (so we can make a prediction)
filtered = data.to_frame()
# Filter all data together using to_frame
# convert to DataFrame
y = self._y_orig
if isinstance(y, Series):
y = y.unstack()
data['__y__'] = y
data_long = data.to_frame()
x_filt = filtered.filter(x_names)
x = data_long.filter(x_names)
y = data_long['__y__']
if self._weights is not None and not self._weights.empty:
weights = data_long['__weights__']
else:
weights = None
return x, x_filt, y, weights, cat_mapping
def _convert_x(self, x):
# Converts non-numeric data in x to floats. x_converted is the
# DataFrame with converted values, and x_conversion is a dict that
# provides the reverse mapping. For example, if 'A' was converted to 0
# for x named 'variety', then x_conversion['variety'][0] is 'A'.
x_converted = {}
cat_mapping = {}
# x can be either a dict or a Panel, but in Python 3, dicts don't have
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
if not isinstance(df, DataFrame):
raise AssertionError("all input items must be DataFrames, "
"at least one is of "
"type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
else:
try:
df = df.astype(float)
except (TypeError, ValueError):
values = df.values
distinct_values = sorted(set(values.flat))
cat_mapping[key] = dict(enumerate(distinct_values))
new_values = np.searchsorted(distinct_values, values)
x_converted[key] = DataFrame(new_values, index=df.index,
columns=df.columns)
if len(cat_mapping) == 0:
x_converted = x
return x_converted, cat_mapping
def _add_dummies(self, panel, mapping):
"""
Add entity and / or categorical dummies to input X DataFrame
Returns
-------
DataFrame
"""
panel = self._add_entity_effects(panel)
panel = self._add_categorical_dummies(panel, mapping)
return panel
def _add_entity_effects(self, panel):
"""
Add entity dummies to panel
Returns
-------
DataFrame
"""
from pandas.core.reshape import make_axis_dummies
if not self._entity_effects:
return panel
self.log('-- Adding entity fixed effect dummies')
dummies = make_axis_dummies(panel, 'minor')
if not self._use_all_dummies:
if 'entity' in self._dropped_dummies:
to_exclude = str(self._dropped_dummies.get('entity'))
else:
to_exclude = dummies.columns[0]
if to_exclude not in dummies.columns:
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log('-- Excluding dummy for entity: %s' % to_exclude)
dummies = dummies.filter(dummies.columns.difference([to_exclude]))
dummies = dummies.add_prefix('FE_')
panel = panel.join(dummies)
return panel
def _add_categorical_dummies(self, panel, cat_mappings):
"""
Add categorical dummies to panel
Returns
-------
DataFrame
"""
if not self._x_effects:
return panel
dropped_dummy = (self._entity_effects and not self._use_all_dummies)
for effect in self._x_effects:
self.log('-- Adding fixed effect dummies for %s' % effect)
dummies = get_dummies(panel[effect])
val_map = cat_mappings.get(effect)
if val_map:
val_map = dict((v, k) for k, v in compat.iteritems(val_map))
if dropped_dummy or not self._use_all_dummies:
if effect in self._dropped_dummies:
to_exclude = mapped_name = self._dropped_dummies.get(
effect)
if val_map:
mapped_name = val_map[to_exclude]
else:
to_exclude = mapped_name = dummies.columns[0]
if mapped_name not in dummies.columns: # pragma: no cover
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log(
'-- Excluding dummy for %s: %s' % (effect, to_exclude))
dummies = dummies.filter(dummies.columns.difference([mapped_name]))
dropped_dummy = True
dummies = _convertDummies(dummies, cat_mappings.get(effect))
dummies = dummies.add_prefix('%s_' % effect)
panel = panel.join(dummies)
return panel
@property
def _use_all_dummies(self):
"""
In the case of using an intercept or including time fixed
effects, completely partitioning the sample would make the X
not full rank.
"""
return (not self._intercept and not self._time_effects)
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
beta, _, _, _ = np.linalg.lstsq(X, Y)
return beta
@cache_readonly
def beta(self):
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = math.rank(self._x_trans.values)
if self._time_effects:
df += self._total_times
return df
@cache_readonly
def _r2_raw(self):
Y = self._y_trans.values.squeeze()
X = self._x_trans.values
resid = Y - np.dot(X, self._beta_raw)
SSE = (resid ** 2).sum()
if self._use_centered_tss:
SST = ((Y - np.mean(Y)) ** 2).sum()
else:
SST = (Y ** 2).sum()
return 1 - SSE / SST
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept or self._entity_effects or self._time_effects
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
Y = self._y.values.squeeze()
X = self._x.values
return Y - np.dot(X, self._beta_raw)
@cache_readonly
def resid(self):
return self._unstack_vector(self._resid_raw)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
# X = self._x.values
# Y = self._y.values.squeeze()
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
resid = Y - np.dot(X, self._beta_raw)
ss = (resid ** 2).sum()
return np.sqrt(ss / (self._nobs - self._df_raw))
@cache_readonly
def _var_beta_raw(self):
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
x = self._x
y = self._y
if self._time_effects:
xx = _xx_time_effects(x, y)
else:
xx = np.dot(x.values.T, x.values)
return _var_beta_panel(y, x, self._beta_raw, xx,
self._rmse_raw, cluster_axis, self._nw_lags,
self._nobs, self._df_raw, self._nw_overlap)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return np.dot(self._x.values, self._beta_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_vector(self._y_fitted_raw, index=self._x.index)
def _unstack_vector(self, vec, index=None):
if index is None:
index = self._y_trans.index
panel = DataFrame(vec, index=index, columns=['dummy'])
return panel.to_panel()['dummy']
def _unstack_y(self, vec):
unstacked = self._unstack_vector(vec)
return unstacked.reindex(self.beta.index)
@cache_readonly
def _time_obs_count(self):
return self._y_trans.count(level=0).values
@cache_readonly
def _time_has_obs(self):
return self._time_obs_count > 0
@property
def _nobs(self):
return len(self._y)
def _convertDummies(dummies, mapping):
# cleans up the names of the generated dummies
new_items = []
for item in dummies.columns:
if not mapping:
var = str(item)
if isinstance(item, float):
var = '%g' % item
new_items.append(var)
else:
# renames the dummies if a conversion dict is provided
new_items.append(mapping[int(item)])
dummies = DataFrame(dummies.values, index=dummies.index,
columns=new_items)
return dummies
def _is_numeric(df):
for col in df:
if df[col].dtype.name == 'object':
return False
return True
def add_intercept(panel, name='intercept'):
"""
Add column of ones to input panel
Parameters
----------
panel: Panel / DataFrame
name: string, default 'intercept']
Returns
-------
New object (same type as input)
"""
panel = panel.copy()
panel[name] = 1.
return panel.consolidate()
class MovingPanelOLS(MovingOLS, PanelOLS):
"""Implements rolling/expanding panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None,
window_type='expanding', window=None,
min_periods=None,
min_obs=None,
intercept=True,
nw_lags=None, nw_overlap=False,
entity_effects=False,
time_effects=False,
x_effects=None,
cluster=None,
dropped_dummies=None,
verbose=False):
self._args = dict(intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap,
entity_effects=entity_effects,
time_effects=time_effects,
x_effects=x_effects,
cluster=cluster,
dropped_dummies=dropped_dummies,
verbose=verbose)
PanelOLS.__init__(self, y=y, x=x, weights=weights,
**self._args)
self._set_window(window_type, window, min_periods)
if min_obs is None:
min_obs = len(self._x.columns) + 1
self._min_obs = min_obs
@cache_readonly
def resid(self):
return self._unstack_y(self._resid_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_y(self._y_fitted_raw)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return self._unstack_y(self._y_predict_raw)
def lagged_y_predict(self, lag=1):
"""
Compute forecast Y value lagging coefficient by input number
of time periods
Parameters
----------
lag : int
Returns
-------
DataFrame
"""
x = self._x.values
betas = self._beta_matrix(lag=lag)
return self._unstack_y((betas * x).sum(1))
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = self._rolling_rank()
if self._time_effects:
df += self._window_time_obs
return df[self._valid_indices]
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x
y = self._y
dates = x.index.levels[0]
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
if not self._time_effects:
# Non-transformed X
cum_xx = self._cum_xx(x)
results = []
for n, i in enumerate(self._valid_indices):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
date = dates[i]
x_slice = x.truncate(prior_date, date)
y_slice = y.truncate(prior_date, date)
if self._time_effects:
xx = _xx_time_effects(x_slice, y_slice)
else:
xx = cum_xx[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
result = _var_beta_panel(y_slice, x_slice, beta[n], xx, rmse[n],
cluster_axis, self._nw_lags,
nobs[n], df[n], self._nw_overlap)
results.append(result)
return np.array(results)
@cache_readonly
def _resid_raw(self):
beta_matrix = self._beta_matrix(lag=0)
Y = self._y.values.squeeze()
X = self._x.values
resid = Y - (X * beta_matrix).sum(1)
return resid
@cache_readonly
def _y_fitted_raw(self):
x = self._x.values
betas = self._beta_matrix(lag=0)
return (betas * x).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
x = self._x.values
betas = self._beta_matrix(lag=1)
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
labels = major_labels - lag
indexer = self._valid_indices.searchsorted(labels, side='left')
beta_matrix = self._beta_raw[indexer]
beta_matrix[labels < self._valid_indices[0]] = np.NaN
return beta_matrix
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
# TODO: write unit tests for this
rank_threshold = len(self._x.columns) + 1
if self._min_obs < rank_threshold: # pragma: no cover
warnings.warn('min_obs is smaller than rank of X matrix')
enough_observations = self._nobs_raw >= self._min_obs
enough_time_periods = self._window_time_obs >= self._min_periods
return enough_time_periods & enough_observations
def create_ols_dict(attr):
def attr_getter(self):
d = {}
for k, v in compat.iteritems(self.results):
result = getattr(v, attr)
d[k] = result
return d
return attr_getter
def create_ols_attr(attr):
return property(create_ols_dict(attr))
class NonPooledPanelOLS(object):
"""Implements non-pooled panel OLS.
Parameters
----------
y : DataFrame
x : Series, DataFrame, or dict of Series
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
window_type : {'full_sample', 'rolling', 'expanding'}
'full_sample' by default
window : int
size of window (for rolling/expanding OLS)
"""
ATTRIBUTES = [
'beta',
'df',
'df_model',
'df_resid',
'f_stat',
'p_value',
'r2',
'r2_adj',
'resid',
'rmse',
'std_err',
'summary_as_matrix',
't_stat',
'var_beta',
'x',
'y',
'y_fitted',
'y_predict'
]
def __init__(self, y, x, window_type='full_sample', window=None,
min_periods=None, intercept=True, nw_lags=None,
nw_overlap=False):
for attr in self.ATTRIBUTES:
setattr(self.__class__, attr, create_ols_attr(attr))
results = {}
for entity in y:
entity_y = y[entity]
entity_x = {}
for x_var in x:
entity_x[x_var] = x[x_var][entity]
from pandas.stats.interface import ols
results[entity] = ols(y=entity_y,
x=entity_x,
window_type=window_type,
window=window,
min_periods=min_periods,
intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap)
self.results = results
def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis,
nw_lags, nobs, df, nw_overlap):
xx_inv = math.inv(xx)
yv = y.values
if cluster_axis is None:
if nw_lags is None:
return xx_inv * (rmse ** 2)
else:
resid = yv - np.dot(x.values, beta)
m = (x.values.T * resid).T
xeps = math.newey_west(m, nw_lags, nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
else:
Xb = np.dot(x.values, beta).reshape((len(x.values), 1))
resid = DataFrame(yv[:, None] - Xb, index=y.index, columns=['resid'])
if cluster_axis == 1:
x = x.swaplevel(0, 1).sortlevel(0)
resid = resid.swaplevel(0, 1).sortlevel(0)
m = _group_agg(x.values * resid.values, x.index._bounds,
lambda x: np.sum(x, axis=0))
if nw_lags is None:
nw_lags = 0
xox = 0
for i in range(len(x.index.levels[0])):
xox += math.newey_west(m[i: i + 1], nw_lags,
nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xox, xx_inv))
def _group_agg(values, bounds, f):
"""
R-style aggregator
Parameters
----------
values : N-length or N x K ndarray
bounds : B-length ndarray
f : ndarray aggregation function
Returns
-------
ndarray with same length as bounds array
"""
if values.ndim == 1:
N = len(values)
result = np.empty(len(bounds), dtype=float)
elif values.ndim == 2:
N, K = values.shape
result = np.empty((len(bounds), K), dtype=float)
testagg = f(values[:min(1, len(values))])
if isinstance(testagg, np.ndarray) and testagg.ndim == 2:
raise AssertionError('Function must reduce')
for i, left_bound in enumerate(bounds):
if i == len(bounds) - 1:
right_bound = N
else:
right_bound = bounds[i + 1]
result[i] = f(values[left_bound:right_bound])
return result
def _xx_time_effects(x, y):
"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
"""
# X'X
xx = np.dot(x.values.T, x.values)
xt = x.sum(level=0).values
count = y.unstack().count(1).values
selector = count > 0
# X'X - (T'T)^-1 (T'X)
xt = xt[selector]
count = count[selector]
return xx - np.dot(xt.T / count, xt)
| apache-2.0 |
louispotok/pandas | pandas/tests/series/indexing/test_datetime.py | 3 | 20441 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame,
date_range, Timestamp, DatetimeIndex, NaT)
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal,
assert_frame_equal, assert_almost_equal)
import pandas.util.testing as tm
import pandas._libs.index as _index
from pandas._libs import tslib
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def test_fancy_getitem():
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem():
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s['1/2/2009'] = -2
assert s[48] == -2
s['1/2/2009':'2009-06-05'] = -3
assert (s[48:54] == -3).all()
def test_dti_snap():
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
assert (res == exp).all()
def test_dti_reset_index_round_trip():
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype('M8[ns]')
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
assert df.index[0] == stamp
assert df.reset_index()['Date'][0] == stamp
def test_series_set_value():
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = Series().set_value(dates[0], 1.)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
@pytest.mark.slow
def test_slice_locs_indexerror():
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)]
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(np.arange(4., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
# duplicates
df = pd.DataFrame(np.arange(5., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated():
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
assert (-result).all()
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(
x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H',
tz='America/New_York')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex():
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
# GH#18435 strings get a pass from tzawareness compat
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
lb = "1990-01-01 04:00:00-0500"
rb = "1990-01-01 07:00:00-0500"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
assert result == expected
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
with pytest.raises(TypeError):
# tznaive vs tzaware comparison is invalid
# see GH#18376, GH#18162
ts[(ts.index >= lb) & (ts.index <= rb)]
lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)
rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex():
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_getitem_median_slice_bug():
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_datetime_indexing():
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
"""
test duplicates in time series
"""
@pytest.fixture(scope='module')
def dups():
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
return Series(np.random.randn(len(dates)), index=dates)
def test_constructor(dups):
assert isinstance(dups, Series)
assert isinstance(dups.index, DatetimeIndex)
def test_is_unique_monotonic(dups):
assert not dups.index.is_unique
def test_index_unique(dups):
uniques = dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
assert uniques.dtype == 'M8[ns]' # sanity
tm.assert_index_equal(uniques, expected)
assert dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
assert result.tz is not None
assert result.name == 'foo'
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [tslib.iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t)
for t in range(20)] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_dupes_contains():
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
def test_duplicate_dates_indexing(dups):
ts = dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice():
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(dups):
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff():
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered():
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
# TODO: unused?
s = str(t) # noqa
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result, expected)
compare(slice('2011-01-01', '2011-01-15'))
compare(slice('2010-12-30', '2011-01-15'))
compare(slice('2011-01-01', '2011-01-16'))
# partial ranges
compare(slice('2011-01-01', '2011-01-6'))
compare(slice('2011-01-06', '2011-01-8'))
compare(slice('2011-01-06', '2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
assert t.year == 2005
def test_indexing():
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
expected.name = 'A'
df = DataFrame(dict(A=ts))
result = df['2001']['A']
assert_series_equal(expected, result)
# setting
ts['2001'] = 1
expected = ts['2001']
expected.name = 'A'
df.loc['2001', 'A'] = 1
result = df['2001']['A']
assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00',
freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59',
freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = [Timestamp('2013-05-31 00:00'),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(np.random.rand(5, 5),
columns=['open', 'high', 'low', 'close', 'volume'],
index=date_range('2012-01-02 18:01:00',
periods=5, tz='US/Central', freq='s'))
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', )
pytest.raises(KeyError, df.__getitem__, df.index[2], )
"""
test NaT support
"""
def test_set_none_nan():
series = Series(date_range('1/1/2000', periods=10))
series[3] = None
assert series[3] is NaT
series[3:5] = None
assert series[4] is NaT
series[5] = np.nan
assert series[5] is NaT
series[5:7] = np.nan
assert series[6] is NaT
def test_nat_operations():
# GH 8617
s = Series([0, pd.NaT], dtype='m8[ns]')
exp = s[0]
assert s.median() == exp
assert s.min() == exp
assert s.max() == exp
@pytest.mark.parametrize('method', ["round", "floor", "ceil"])
@pytest.mark.parametrize('freq', ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(method, freq):
# GH14940
s = Series([pd.NaT])
expected = Series(pd.NaT)
round_method = getattr(s.dt, method)
assert_series_equal(round_method(freq), expected)
| bsd-3-clause |
jseabold/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
beepee14/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
aarora79/sb_study | visualize/bubble.py | 1 | 3591 | import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import math
import os
import numpy as np
import statsmodels.api as sm # recommended import according to the docs
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats.mstats as mstats
from common import globals as glob
def get_bubble_size(num_stores):
size = 4 #anything <= 5 stores is 4 pixels in size
if num_stores > 10000:
size = 60
elif num_stores > 2000:
size = 30
elif num_stores > 1000:
size = 24
elif num_stores > 500:
size = 20
elif num_stores > 100:
size = 14
elif num_stores > 50:
size = 8
elif num_stores > 5:
size = 6
return size
def draw():
glob.log.info('about to begin visualization for bubble chart...')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.COMBINED_DATASET_CSV)
df = pd.read_csv(fname)
hover_text = []
bubble_size = []
#'IT.NET.USER.P2', 'ST.INT.ARVL'
for index, row in df.iterrows():
text = 'Country, Continent: %s,%s<br>Internet users per 100 people: %f<br>International tourist arrival: %f<br>\
Number of Starbucks stores: %d' %(row['name'], row['continent'], row['IT.NET.USER.P2'], row['ST.INT.ARVL'], row['Num.Starbucks.Stores'])
hover_text.append(text)
size = get_bubble_size(row['Num.Starbucks.Stores'])
bubble_size.append(size)
df['text'] = hover_text
df['size'] = bubble_size
traces = []
color=['rgb(93, 164, 214)', 'rgb(255, 144, 14)', 'rgb(44, 160, 101)', 'rgb(255, 65, 54)', 'rgb(0,100,0)', 'rgb(218,165,32)']
ci = 0
for continent in df['continent'].unique():
trace = go.Scatter(
x=df['ST.INT.ARVL'][df['continent'] == continent],
y=df['IT.NET.USER.P2'][df['continent'] == continent],
mode='markers',
name=continent,
text=df['text'][df['continent'] == continent],
marker=dict(
symbol='circle',
sizemode='diameter',
#sizeref=1,
size=df['size'][df['continent'] == continent],
color = color[ci],
line=dict(
width=2
),
)
)
traces.append(trace)
ci += 1
data = traces
layout = go.Layout(
title='Starbucks Stores | Tourist Arrivals Vs Internet Users',
xaxis=dict(
title='International tourist arrivals',
gridcolor='rgb(255, 255, 255)',
#range=[10, 20],
#type='log',
#range=[201000,83767000],
range=[101000,93767000],
zerolinewidth=1,
ticklen=5,
gridwidth=2,
),
yaxis=dict(
title='Internet users',
gridcolor='rgb(255, 255, 255)',
range=[0,100],
zerolinewidth=1,
ticklen=5,
gridwidth=2,
),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
#autosize=False,
#width=500,
#height=500,
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='starbucks_stores')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.VIS_DIR, glob.STARBUCKS_BUBBLE_CHART)
py.image.save_as(fig, filename=fname)
if __name__ == "__main__":
glob.log.info('drawing bubble chart...')
# execute only if run as a script
draw(sys.argv) | mit |
voxlol/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
kirillzhuravlev/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
gsmaxwell/phase_offset_rx | gr-digital/examples/snr_estimators.py | 14 | 5302 | #!/usr/bin/env python
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data, alpha):
n = 0
mean = 0
M2 = 0
M3 = 0
d_M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * (n)
mean = mean + delta_n
M3 = term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
d_M3 = (0.001)*M3 + (1-0.001)*d_M3;
return d_M3
def snr_est_simple(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.real(scipy.mean(signal**2))
y3 = (y1*y1 - y2)
snr_rat = y1*y1/y3
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(abs(signal.real), 0.001)
skw = y4*y4 / (y2*y2*y2);
snr_rat = y1*y1 / (y3 + skw*y1*y1)
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = 2*scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = 2*((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits = 2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)/2
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(snr0dB)
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = gr.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = gr.channel_model(1.0/scale)
gr_snk = gr.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
NeurotechBerkeley/bci-course | lab5/lsl-record.py | 5 | 2841 | #!/usr/bin/env python
## code by Alexandre Barachant
import numpy as np
import pandas as pd
from time import time, strftime, gmtime
from optparse import OptionParser
from pylsl import StreamInlet, resolve_byprop
from sklearn.linear_model import LinearRegression
default_fname = ("data/data_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime()))
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=300,
help="duration of the recording in seconds.")
parser.add_option("-f", "--filename",
dest="filename", type='str', default=default_fname,
help="Name of the recording file.")
# dejitter timestamps
dejitter = False
(options, args) = parser.parse_args()
print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
raise(RuntimeError, "Cant find EEG stream")
print("Start aquiring data")
inlet = StreamInlet(streams[0], max_chunklen=12)
eeg_time_correction = inlet.time_correction()
print("looking for a Markers stream...")
marker_streams = resolve_byprop('type', 'Markers', timeout=2)
if marker_streams:
inlet_marker = StreamInlet(marker_streams[0])
marker_time_correction = inlet_marker.time_correction()
else:
inlet_marker = False
print("Cant find Markers stream")
info = inlet.info()
description = info.desc()
freq = info.nominal_srate()
Nchan = info.channel_count()
ch = description.child('channels').first_child()
ch_names = [ch.child_value('label')]
for i in range(1, Nchan):
ch = ch.next_sibling()
ch_names.append(ch.child_value('label'))
res = []
timestamps = []
markers = []
t_init = time()
print('Start recording at time t=%.3f' % t_init)
while (time() - t_init) < options.duration:
try:
data, timestamp = inlet.pull_chunk(timeout=1.0,
max_samples=12)
if timestamp:
res.append(data)
timestamps.extend(timestamp)
if inlet_marker:
marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
if timestamp:
markers.append([marker, timestamp])
except KeyboardInterrupt:
break
res = np.concatenate(res, axis=0)
timestamps = np.array(timestamps)
if dejitter:
y = timestamps
X = np.atleast_2d(np.arange(0, len(y))).T
lr = LinearRegression()
lr.fit(X, y)
timestamps = lr.predict(X)
res = np.c_[timestamps, res]
data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)
data['Marker'] = 0
# process markers:
for marker in markers:
# find index of margers
ix = np.argmin(np.abs(marker[1] - timestamps))
val = timestamps[ix]
data.loc[ix, 'Marker'] = marker[0][0]
data.to_csv(options.filename, float_format='%.3f', index=False)
print('Done !')
| mit |
ChanderG/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
UNR-AERIAL/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jviada/QuantEcon.py | examples/eigenvec.py | 7 | 1239 | """
Filename: eigenvec.py
Authors: Tom Sargent and John Stachurski.
Illustrates eigenvectors.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eig
A = ((1, 2),
(2, 1))
A = np.array(A)
evals, evecs = eig(A)
evecs = evecs[:, 0], evecs[:, 1]
fig, ax = plt.subplots()
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.grid(alpha=0.4)
xmin, xmax = -3, 3
ymin, ymax = -3, 3
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# ax.set_xticks(())
# ax.set_yticks(())
# Plot each eigenvector
for v in evecs:
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='blue',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the image of each eigenvector
for v in evecs:
v = np.dot(A, v)
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='red',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the lines they run through
x = np.linspace(xmin, xmax, 3)
for v in evecs:
a = v[1] / v[0]
ax.plot(x, a * x, 'b-', lw=0.4)
plt.show()
| bsd-3-clause |
costypetrisor/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 20 | 22194 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(category=RuntimeWarning)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
tsob/cnn-music-structure | src/generate_data.py | 1 | 12431 | #!/usr/bin/env python
"""
@name: generate_data.py
@desc: generate some data
@auth: Tim O'Brien
@date: Feb. 18th, 2016
"""
import argparse
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import librosa
import util.evaluation as ev
import util.datapaths as dpath
# Debug?
DEBUG_PLOT = False
# Set some params
FS = 44100 # Enforce 44.1 kHz sample rate
N_FFT = 2048
HOP_LENGTH = N_FFT/2 # 50% overlap
N_MFCC = 13
N_MEL = 128
DB_LOW = -250.0 # silence in dB
T_CONTEXT = 3 # seconds of context for our features
N_FRAME_CONTEXT = librosa.time_to_frames(
T_CONTEXT,
sr=FS, hop_length=HOP_LENGTH, n_fft=N_FFT
)[0]+1
# 64 frames on either side, for context
BOUNDARY_KERNEL = signal.gaussian(N_FRAME_CONTEXT, std=32) # For smoothing our y
#BOUNDARY_KERNEL = np.ones(N_FRAME_CONTEXT)
DTYPE = 'float32'
# FOR USE ON AMAZON EC2 AFTER COPYING FROM S3
#DATADIR = os.path.abspath(os.path.join('/mnt','audio'))
#SALAMIDIR = os.path.abspath(os.path.join('/mnt','salami', 'salami-data-public'))
# FOR USE ON WINDOWS MACHINE
DATADIR = os.path.abspath('F:\salami-audio')
SALAMIDIR = os.path.abspath('F:\salami-data-public')
OUTPUTDIR = os.path.abspath('F:\CNNM-output-data')
# FOR USE ON CCRMA NETWORK
#DATADIR = os.path.abspath('/user/t/tsob/Documents/cs231n/proj/data')
#SALAMIDIR = os.path.abspath('/usr/ccrma/media/databases/mir_datasets/salami/salami-data-public')
#OUTPUTDIR = os.path.abspath('/zap/tsob/audio')
# My local machine
#DATADIR = os.path.abspath('/home/tim/Projects/convnet-music-structure/salami-audio')
#SALAMIDIR = os.path.abspath('/home/tim/Projects/convnet-music-structure/salami-data-public')
#OUTPUTDIR = os.path.abspath('/home/tim/Projects/convnet-music-structure/src/zap/')
# One big list of the valid SALAMI ids
SIDS = [1258, 1522, 1491, 1391, 986, 1392, 1562, 1006, 1303, 1514, 982, 1095,
1130, 1216, 1204, 1536, 1492, 1230, 1503, 1096, 1220, 996, 976, 1010,
1120, 1064, 1292, 1008, 1431, 1206, 1074, 1356, 1406, 1559, 1566, 1112,
1278, 1540, 1423, 1170, 1372, 1014, 1496, 1327, 1439, 1528, 1311, 1226,
1138, 1016, 1364, 1484, 1338, 1254, 968, 998, 1302, 1075, 1018, 1166,
1239, 1080, 1032, 1447, 984, 1382, 1284, 1043, 1378, 1467, 1038, 1499,
1059, 1534, 1283, 1352, 1524, 1428, 1502, 1088, 1236, 1543, 1475, 1551,
990, 1589, 1282, 1459, 1379, 1542, 1131, 1460, 1050, 1128, 991, 1560,
1139, 1527, 1270, 1450, 1348, 1331, 1091, 1060, 1015, 1501, 1023, 1200,
1340, 1579, 1287, 1062, 1251, 1424, 1516, 1448, 1597, 1575, 1376, 1511,
1164, 1548, 1555, 1594, 1224, 1470, 1068, 1007, 1104, 1343, 1234, 1152,
1108, 1079, 1212, 972, 1190, 1271, 1136, 1300, 1003, 1103, 1434, 958,
1082, 1046, 1326, 1518, 999, 1388, 1472, 1507, 1036, 1316, 1274, 1198,
1083, 1435, 1387, 1587, 1572, 1290, 1565, 1504, 1127, 1146, 1462, 1268,
1094, 1520, 1366, 1347, 1483, 1517, 1319, 1092, 1498, 971, 1044, 1034,
1223, 1346, 1532, 1494, 1123, 1299, 1370, 1298, 1155, 1574, 1240, 1235,
1264, 1183, 1211, 1586, 1106, 1275, 1027, 1488, 1360, 1490, 1076, 1306,
1580, 1259, 1592, 1280, 1547, 1114, 1119, 1322, 1446, 1359, 1058, 1011,
1443, 1307, 1098, 1351, 1598, 1180, 1419, 1508, 995, 1550, 1051, 1194,
1215, 1247, 1395, 1159, 1531, 1432, 1396, 1276, 1055, 1334, 1272, 1374,
1355, 1390, 1022, 1571, 967, 1557, 1286, 1228, 975, 1024, 1314, 1158,
988, 1039, 1182, 955, 1564, 1279, 1544, 1332, 1294, 1308, 1515, 962,
1420, 1596, 1163, 1047, 1584, 1026, 1436, 1455, 1476, 1403, 1072, 1330,
1244, 1000, 1510, 1573, 994, 1028, 1549, 1179, 1162, 1552, 1238, 1371,
1438, 992, 1124, 1367, 1111, 1590, 980, 1242, 1567, 1556, 1054, 1354,
1539, 1116, 1148, 1004, 1533, 1232, 1339, 1324, 1291, 978, 1048, 1263,
1582, 1315, 1176, 1248, 1509, 1219, 1407, 1400, 1243, 1172, 1442, 1218,
1363, 1090, 1067, 1202, 1523, 1187, 1150, 1195, 956, 1452, 1186, 1563,
1312, 1519, 1427, 1042, 1412, 1595, 1323, 1184, 1086, 1554, 1546, 1246,
1260, 1479, 1099, 1318, 1368, 1558, 1122, 1384, 1525, 974, 1478, 1118,
1588, 1418, 1456, 963, 1078, 1408, 1402, 1444, 1142, 983, 1404, 1250,
1464, 1526, 1207, 1304, 1174, 1019, 1151, 1576, 1358, 1375, 1336, 1192,
1362, 1102, 1474, 1288, 1296, 1386, 1066, 1056, 970, 1512, 1399, 1416,
1188, 1070, 1107, 1063, 1295, 1581, 1266, 1012, 1175, 1422, 1134, 979,
1342, 1154, 1156, 1203, 1168, 1415, 1541, 1132, 1256, 1458, 1482, 1035,
1196, 1583, 1530, 1310, 1328, 1143, 1100, 1506, 1135, 1451, 1147, 1191,
1591, 960, 1110, 1414, 1383, 964, 1335, 1231, 1210, 1535, 1394, 1262,
959, 1214, 1350, 1570, 1084, 1495, 1020, 1071, 1568, 1380, 1144, 1487,
1222, 1199, 1538, 1160, 1578, 1468]
def get_sids(datadir=DATADIR):
"""
Get the SALAMI IDS and their audio filenames, returned as lists, for a
given directory containing all the audio files.
"""
sids = [int(os.path.splitext(listitem)[0]) \
for listitem in os.listdir(datadir)]
paths = [listitem for listitem in os.listdir(datadir)]
return sids, paths
def get_data(
sids,
datadir=DATADIR,
salamidir=SALAMIDIR,
outputdir=OUTPUTDIR,
prefix=''
):
"""
Give me some data!
"""
paths = os.listdir(datadir)
train = {}
for sid in sids:
pathmask = [path.startswith(str(sid)) for path in paths]
path_idx = pathmask.index(True)
path = paths[path_idx]
X_path, X_shape, y_path, y_shape = serialize_song(
sid,
path,
datadir=datadir,
salamidir=salamidir,
outputdir=outputdir,
prefix=''
)
# Put all the output into dictionaries
train[str(sid)] = {}
train[str(sid)]['X_path'] = X_path
train[str(sid)]['y_path'] = y_path
train[str(sid)]['X_shape'] = X_shape
train[str(sid)]['y_shape'] = y_shape
# Save the dicts for later
np.save(
os.path.join(outputdir,'datadict'+prefix+'.npy'),
train
)
return train
def get_preparsed_data(datadict_path):
"""
Give me some preparsed data!
"""
train = np.load(datadict_path).tolist()
return train
def serialize_song(
sid,
path,
datadir=DATADIR,
salamidir=SALAMIDIR,
outputdir=OUTPUTDIR,
prefix='data'
):
"""
serialize_data_chunk()
Serializes a chunk of data on disk, given SIDs and corresponding paths.
Arguments:
sids : the SIDs (int list)
paths : paths to sids audio files (string list)
datadir : where the audio files are stored
salamidir : i.e. the salami-data-public dir from a cloned SALAMI repo
outputdir : for serialized data on disk
prefix : prefix for serialized data file on disk
Outputs:
X_path : string paths to the serialized files
X_shape : shape of data serialized in X_path
y_path : string paths to the serialized files
y_shape : shape of data serialized in y_path
"""
X, y = None, None
X_path, X_shape, y_path, y_shape = None, None, None, None
X_shape = [0, 1, N_MEL, N_FRAME_CONTEXT]
y_shape = [0, 1]
print "SID: {0},\tfile: {1}".format(sid, path)
y_path = os.path.abspath(
os.path.join(outputdir, prefix + str(sid) + '_y')
)
X_path = os.path.abspath(
os.path.join(outputdir, prefix + str(sid) + '_X')
)
# Get the annotated segment times (sec)
times = ev.id2segtimes(
sid,
ann_type="uppercase",
salamipath=salamidir
)
times_frames = librosa.time_to_frames(
times,
sr=FS,
hop_length=HOP_LENGTH,
n_fft=N_FFT
)
# Get signal
sig, fs = librosa.load(
os.path.join(datadir, path),
FS
)
# Get feature frames
sig_feat = librosa.feature.melspectrogram(
y=sig,
sr=fs,
n_fft=N_FFT,
hop_length=HOP_LENGTH,
n_mels=N_MEL,
fmax=1600
)
sig_feat = 20.0*np.log10(np.clip( sig_feat, a_min=1e-12, a_max=None)) # convert to dB
sig_feat = sig_feat - np.max(sig_feat) # Normalize to 0dB
sig_feat[sig_feat==-np.inf] = DB_LOW # screen out inf
# Keep track of the number of frames for this song
n_frames = sig_feat.shape[1]
y_shape[0] = n_frames # increment the shape of our final output y data
X_shape[0] = n_frames # increment the shape of our final output y data
# Pad the frames, so we can have frames centered at the very start and
# end of the song.
sig_feat = np.hstack((
np.ones((N_MEL, N_FRAME_CONTEXT/2)) * DB_LOW,
sig_feat,
np.ones((N_MEL, N_FRAME_CONTEXT/2)) * DB_LOW
))
# Generate the boundary indicator
y = np.memmap(
y_path,
dtype=DTYPE,
mode='w+',
shape=tuple(y_shape)
)
y[:] = np.zeros((n_frames,1))[:] # start with zeros
y[np.minimum(times_frames,n_frames-1),0] = 1.0
if(DEBUG_PLOT):
plt.figure(figsize=(10, 3))
plt.plot(
y,
label="Annotations"
)
# Smooth y with the gaussian kernel
y[:,0] = np.convolve( y[:,0], BOUNDARY_KERNEL, 'same')
y[:,0] = np.minimum(y[:,0],1.0) # nothing above 1
if(DEBUG_PLOT):
plt.plot(
y,
label="Smoothed"
)
plt.xlabel("Frame number")
plt.ylabel("Segment boundary strength")
plt.legend()
# plt.colorbar()
plt.savefig('./seg.pdf', bbox_inches='tight')
# plt.show()
# Generate the training data
X = np.memmap(
X_path,
dtype=DTYPE,
mode='w+',
shape=tuple(X_shape)
)
for i_frame in xrange(n_frames):
X[i_frame,0] = sig_feat[:,i_frame:i_frame+N_FRAME_CONTEXT]
# debug plot
if(DEBUG_PLOT):
plt.figure()
plt.subplot(211)
plt.imshow(X[X.shape[0]/2,0])
plt.colorbar()
plt.subplot(212)
plt.plot(y)
plt.show()
# Flush our binary data to file
X.flush()
y.flush()
return X_path, X_shape, y_path, y_shape
if __name__ == "__main__":
P = argparse.ArgumentParser(
description='Generate some data for the CNN.'
)
P.add_argument(
'-a', '--audiodir',
help='Directory with salami audio files.',
required=False,
default=DATADIR
)
P.add_argument(
'-ds', '--salamidir',
help='Directory with salami annotation files.',
required=False,
default=SALAMIDIR
)
P.add_argument(
'-w', '--workingdir',
help='Directory for intermediate data and model files.',
required=False,
default=OUTPUTDIR
)
P.add_argument(
'-t', '--train',
help='Number of songs to include in training set.',
required=False,
default=1
)
P.add_argument(
'-v', '--val',
help='Number of songs to include in validation set.',
required=False,
default=1
)
P.add_argument(
'-s', '--test',
help='Number of songs to include in test set.',
required=False,
default=1
)
ARGS = P.parse_args()
n_train = int(ARGS.train)
n_val = int(ARGS.val)
n_test = int(ARGS.test)
n_total = n_train + n_val + n_test
n_sids = len(SIDS)
SID_SUBSET = np.random.choice(SIDS, size=n_total, replace=False)
train = get_data(
SID_SUBSET[:n_train],
datadir=ARGS.audiodir,
salamidir=ARGS.salamidir,
outputdir=ARGS.workingdir,
prefix='train')
val = get_data(
SID_SUBSET[n_train:n_train+n_val],
datadir=ARGS.audiodir,
salamidir=ARGS.salamidir,
outputdir=ARGS.workingdir,
prefix='val'
)
test = get_data(
SID_SUBSET[n_train+n_val:],
datadir=ARGS.audiodir,
salamidir=ARGS.salamidir,
outputdir=ARGS.workingdir,
prefix='test'
)
print 'TRAINING SET:'
print train
print 'VALIDATION SET:'
print val
print 'TEST SET:'
print test
| mit |
google-research/google-research | graph_embedding/dmon/train_dgi_batched.py | 1 | 5453 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.layers.gcn import GCN
from graph_embedding.dmon.models.dgi import deep_graph_infomax
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians
from graph_embedding.dmon.utilities.batching import make_batch
from graph_embedding.dmon.utilities.batching import random_batch
from graph_embedding.dmon.utilities.shuffling import shuffle_inbatch
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_integer(
'batch_size', 16, 'Batch size to use for training.', lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
train_size = FLAGS.train_size
batch_size = FLAGS.batch_size
data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters)
graph_clean = construct_knn_graph(data_clean)
n_neighbors = [15, 10] # TODO(tsitsulin): move to FLAGS.
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(
f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}'
)
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(
total_matrix_size,
2,
))
input_features_corrupted = tf.keras.layers.Input(
shape=(
total_matrix_size,
2,
))
input_graph = tf.keras.layers.Input((
total_matrix_size,
total_matrix_size,
))
encoder = [GCN(64), GCN(32), tf.keras.layers.Lambda(lambda x: x[0][:, 0, :])]
model = deep_graph_infomax(
[input_features, input_features_corrupted, input_graph], encoder)
def loss(model, x, y, training):
_, y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
for loss_internal in model.losses:
loss_value += loss_internal
return loss_value, tape.gradient(loss_value, model.trainable_variables)
labels_dgi = tf.concat([tf.zeros([batch_size, 1]),
tf.ones([batch_size, 1])], 0)
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
for epoch in range(FLAGS.n_epochs):
subgraph_mat, features_mat, _, nonzero_indices = random_batch(
graph_clean, data_dirty, batch_size, n_neighbors)
perc_shuffle = 1 # np.linspace(1, 0.25, max_epoch)[epoch]
features_corrupted = shuffle_inbatch(features_mat, nonzero_indices,
perc_shuffle)
loss_value, grads = grad(model,
[features_mat, features_corrupted, subgraph_mat],
labels_dgi)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(
f'epoch {epoch}, loss: {loss_value.numpy():.4f}, shuffle %: {100*perc_shuffle:.2f}'
)
subgraph_mat, features_mat, _ = make_batch(graph_clean, data_dirty,
np.arange(n_nodes), n_neighbors)
representations, _ = model([features_mat, features_mat, subgraph_mat],
training=False)
representations = representations.numpy()
clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf.fit(representations[train_mask], labels[train_mask])
clusters = clf.predict(representations[test_mask])
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', 100 * accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
baliga-lab/cmonkey2 | cmonkey/tools/plot_expressions.py | 1 | 2029 | """plot_expressions.py - make cluster gene expression plots"""
import matplotlib.pyplot as plt
import numpy as np
import os
import math
from cmonkey.tools.util import read_ratios
import cmonkey.database as cm2db
from sqlalchemy import func, and_
def normalize_js(value):
if math.isnan(value) or math.isinf(value):
return 0.0
else:
return value
def generate_plots(session, result_dir, output_dir):
ratios = read_ratios(result_dir)
iteration = session.query(func.max(cm2db.RowMember.iteration))
clusters = [r[0] for r in session.query(cm2db.RowMember.cluster).distinct().filter(
cm2db.RowMember.iteration == iteration)]
figure = plt.figure(figsize=(6,3))
for cluster in clusters:
plt.clf()
plt.cla()
genes = [r.row_name.name for r in session.query(cm2db.RowMember).filter(
and_(cm2db.RowMember.cluster == cluster, cm2db.RowMember.iteration == iteration))]
cluster_conds = [c.column_name.name for c in session.query(cm2db.ColumnMember).filter(
and_(cm2db.ColumnMember.cluster == cluster, cm2db.ColumnMember.iteration == iteration))]
all_conds = [c[0] for c in session.query(cm2db.ColumnName.name).distinct()]
non_cluster_conds = [cond for cond in all_conds if not cond in set(cluster_conds)]
cluster_data = ratios.loc[genes, cluster_conds]
non_cluster_data = ratios.loc[genes, non_cluster_conds]
min_value = ratios.min()
max_value = ratios.max()
for gene in genes:
values = [normalize_js(val) for val in cluster_data.loc[gene,:].values]
values += [normalize_js(val) for val in non_cluster_data.loc[gene,:].values]
plt.plot(values)
# plot the "in"/"out" separator line
cut_line = len(cluster_conds)
plt.plot([cut_line, cut_line], [min_value, max_value], color='red',
linestyle='--', linewidth=1)
plt.savefig(os.path.join(output_dir, "exp-%d" % cluster))
plt.close(figure)
| lgpl-3.0 |
otmaneJai/Zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
previtus/MGR-Project-Code | ResultsGraphing/622_model_competition.py | 1 | 3408 | import os
from Omnipresent import len_
from Downloader.VisualizeHistory import loadHistory
from ResultsGraphing.custom import finally_show, plot_2x2_detailed, count_averages, save_plot, boxplots_in_row, boxplots_in_row_custom611, plot_two_together, plot_together
dir_folder = os.path.dirname(os.path.abspath(__file__))
###
"""
The idea:
Compare three models on each interesting dataset.
We have models:
img
osm
mix
with datasets: 5556x_markable_640x640, 5556x_minlen30_640px, 5556x_minlen30_640px_2x_expanded .
Three figures each:
all three plotted together
box plot of train plus val of the last
... and of the best epoch
"""
dataset1 = "5556x_markable_640x640"
dataset2 = "5556x_minlen30_640px"
dataset3 = "5556x_minlen30_640px_2x_expanded"
dataset_txt = "markable" # markable or minlen30 or expanded30
SAVE = True
path_folder = dir_folder + '/data/k-fold-tests/6.2.2. model competition - img vs osm vs mix/'
out_folder_1 = dir_folder + '/graphs/6.2.2._model_competition-img-vs-osm-vs-mix/fig1_evolution_' + dataset_txt
out_folder_2 = dir_folder + '/graphs/6.2.2._model_competition-img-vs-osm-vs-mix/fig2_last_epoch_' + dataset_txt
out_folder_3 = dir_folder + '/graphs/6.2.2._model_competition-img-vs-osm-vs-mix/fig3_best_epoch_' + dataset_txt
if dataset_txt == "markable":
osm = path_folder + "5556x_markable_640x640_osm_1761330.npy"
img = path_folder + "5556x_markable_640x640_img_1769355.npy"
mix = path_folder + "5556x_markable_640x640_mix_1761329.npy"
elif dataset_txt == "minlen30":
osm = path_folder + "5556x_minlen30_640px_osm_1761336.npy"
img = path_folder + "5556x_minlen30_640px_img_1769353.npy"
mix = path_folder + "5556x_minlen30_640px_mix_1761323.npy"
else:
osm = path_folder + "5556x_minlen30_640px_2x_expanded_osm_1761327.npy"
img = path_folder + "5556x_minlen30_640px_2x_expanded_img_1769354.npy"
mix = path_folder + "5556x_minlen30_640px_2x_expanded_mix_1761326.npy"
data_paths = [osm, img, mix]
data_names = [
"OSM",
"Image",
"Mixed"]
hard_colors = ['red', 'green', 'blue', 'orange']
light_colors = ['pink', 'lightgreen', 'lightblue', 'yellow']
special_histories = []
for i in range(0,len(data_paths)):
special_histories.append(loadHistory(data_paths[i]))
special_histories[i] = count_averages(special_histories[i], 'loss')
# FIGURE 1
import matplotlib.pyplot as plt
names_to_print = ["OSM average val", "OSM val"]
names_to_print += ["Image average val", "Image val"]
names_to_print += ["Mixed average val", "Mixed val"]
custom_title = 'Models comparison'
colors = ["green", "green", "red", "red", "blue", "blue"]
plt = plot_together(special_histories, names_to_print, colors, custom_title)
save_plot(plt, SAVE, out_folder_1)
# FIGURE 2 state in last
custom_title = 'Validation error in last epoch'
plt, figure = boxplots_in_row_custom611(plt, special_histories, data_names, just='both', forced_ymax = 0.17)
figure.suptitle(custom_title) # needs adjustment of the top value
save_plot(plt, SAVE, out_folder_2)
# FIGURE 3 state in their best epoch
custom_title = 'Validation error in best epoch'
plt, figure = boxplots_in_row_custom611(plt, special_histories, data_names, just='both', BestInstead=True, forced_ymax = 0.17)
figure.suptitle(custom_title) # needs adjustment of the top value
save_plot(plt, SAVE, out_folder_3)
finally_show(plt) | mit |
cogmission/nupic.research | htmresearch/frameworks/union_temporal_pooling/activation/excite_functions/excite_functions_all.py | 3 | 3761 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import matplotlib.pyplot as plt
from excite_function_base import ExciteFunctionBase
class LogisticExciteFunction(ExciteFunctionBase):
"""
Implementation of a logistic activation function for activation updating.
Specifically, the function has the following form:
f(x) = (maxValue - minValue) / (1 + exp(-steepness * (x - xMidpoint) ) ) + minValue
Note: The excitation rate is linear. The activation function is
logistic.
"""
def __init__(self, xMidpoint=5, minValue=10, maxValue=20, steepness=1):
"""
@param xMidpoint: Controls where function output is half of 'maxValue,'
i.e. f(xMidpoint) = maxValue / 2
@param minValue: Minimum value of the function
@param maxValue: Controls the maximum value of the function's range
@param steepness: Controls the steepness of the "middle" part of the
curve where output values begin changing rapidly.
Must be a non-zero value.
"""
assert steepness != 0
self._xMidpoint = xMidpoint
self._maxValue = maxValue
self._minValue = minValue
self._steepness = steepness
def excite(self, currentActivation, inputs):
"""
Increases current activation by amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation += self._minValue + (self._maxValue - self._minValue) / (
1 + numpy.exp(-self._steepness * (inputs - self._xMidpoint)))
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
plt.title('Sigmoid Activation Function')
class FixedExciteFunction(ExciteFunctionBase):
"""
Implementation of a simple fixed excite function
The function reset the activation level to a fixed amount
"""
def __init__(self, targetExcLevel=10.0):
"""
"""
self._targetExcLevel = targetExcLevel
def excite(self, currentActivation, inputs):
"""
Increases current activation by a fixed amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation = self._targetExcLevel
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
| agpl-3.0 |
bbusemeyer/mython | busempyer/process_record.py | 2 | 31828 | import numpy as np
import json
import data_processing as dp
import pandas as pd
from pymatgen.io.cif import CifParser
# TODO generalize!
VARTOL = 1e-2
NFE = 8
NORBFE = 10
NORBCH = 4
SMALLSPIN = 1.0 # Spins less than this are considered zero.
def fluctdat_array(jsondat,key='value'):
''' Turn the dictionary of fluctuation data into a single array.'''
# May not work for bundled QWalk jobs. Might need to average instead of [0].
return np.array([d[key] for d in jsondat['fluctuation data']])\
.reshape(jsondat['nspin'],jsondat['nspin'],
jsondat['nregion'],jsondat['nregion'],
jsondat['maxn'],jsondat['maxn'])
# TODO: Inefficient but easy to use.
def old_fluct_vars(flarray):
nspin=flarray.shape[0]
nregion=flarray.shape[2]
nn=flarray.shape[4]
mom=[ (np.arange(nn)*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
mom=np.array(mom).reshape(nspin,nregion)
var=[ ((np.arange(nn)-mom[s1,r1])**2*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
return np.array(var).reshape(nspin,nregion)
def fluct_covars(flarray):
nspin=flarray.shape[0]
nregion=flarray.shape[2]
nn=flarray.shape[4]
mom=[ (np.arange(nn)*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
mom=np.array(mom).reshape(nspin,nregion)
covar=[
((np.arange(nn)-mom[s1,r1])*(np.arange(nn)-mom[s2,r2])\
*flarray[s1,s2,r1,r2]).sum()
for s1 in range(nspin)
for s2 in range(nspin)
for r1 in range(nregion)
for r2 in range(nregion)
]
return np.array(covar).reshape(nspin,nspin,nregion,nregion)
def unpack_nfluct(jsondat):
''' Calculate useful quantities and put them into a nice dataframe.
Example:
>>> mydata=json.load(open('qw.json','r'))
>>> unpack_nfluct(mydata['properties']['region_fluctuation'])
Args:
jsondat (dict): result from calling gosling -json on a QWalk file and using ['properties']['region_fluctuation'].
Returns:
dict: Moments and variances as a dict.
'''
results={}
results['fluctdat']=fluctdat_array(jsondat)
results['flucterr']=fluctdat_array(jsondat,key='error')
count=np.arange(results['fluctdat'].shape[-1])
results['moms']=np.einsum('ssrrnn,n->sr',results['fluctdat'],count)
results['momserr']=np.einsum('ssrrnn,n->sr',results['flucterr']**2,count**2)**0.5
# shifted(s,r,n)=n-mu(s,r)
shifted=count[None,None,:]-results['moms'][:,:,None]
shiftederr=results['momserr'][:,:,None]
results['covars']=np.einsum('aibjck,abc,ijk->aibj',results['fluctdat'],shifted,shifted)
results['covarserr']=\
np.einsum('aibjck,abc,ijk->aibj',results['flucterr']**2,shifted**2,shifted**2)**0.5 +\
np.einsum('aibjck,abc,ijk->aibj',results['fluctdat']**2,shiftederr**2,shifted**2)**0.5 +\
np.einsum('aibjck,abc,ijk->aibj',results['fluctdat']**2,shifted**2,shiftederr**2)**0.5
return results
def analyze_nfluct(fluctdat):
moms=fluctdat['moms']
momserr=fluctdat['momserr']
cov=fluctdat['covars']
coverr=fluctdat['covarserr']
fluctdat.update({
'spin': moms[0] - moms[1],
'charge': moms[0] + moms[1],
'avgerr': (momserr[0]**2 + momserr[1]**2)**0.5,
'magcov': cov[0,0] + cov[1,1] - cov[0,1] - cov[1,0],
'chgcov': cov[0,0] + cov[1,1] + cov[0,1] + cov[1,0],
'coverr': (coverr[0,0]**2 + coverr[1,1]**2 + coverr[0,1]**2 + coverr[1,0]**2)**0.5
})
return fluctdat
################################################################################
# If you're wondering about how to use these, and you're in the Wagner group on
# github, check out my FeTe notebook!
################################################################################
##### !!! These are all written for autogenv1, so they might be obsolete.
###############################################################################
# Process record group of functions.
def process_record(record):
""" Take the json produced from autogen and process into a dictionary of much
processed and more useful results. """
res = {}
copykeys = ['dft','supercell','total_spin','charge','xyz','cif','control']
nonautogen_keys = ['a','c','se_height','ordering','pressure']
for copykey in copykeys+nonautogen_keys:
if copykey in record.keys():
res[copykey] = record[copykey]
if 'dft' in record.keys():
res['dft'] = record['dft']
if 'mag_moments' in record['dft'].keys():
res['dft']['spins_consistent'] = _check_spins(res['dft'],small=SMALLSPIN)
if 'vmc' in record['qmc'].keys():
res['vmc'] = _process_vmc(record['qmc']['vmc'])
if 'dmc' in record['qmc'].keys():
print("Getting DMC")
res['dmc'] = _process_dmc(record['qmc']['dmc'])
if 'results' in record['qmc']['postprocess'].keys():
res['dmc'].update(_process_post(record['qmc']['postprocess']))
return res
def _process_post(post_record):
""" Process postprocess results by k-averaging and site-averaging."""
if 'results' not in post_record.keys(): return {}
res = {}
# Right now just checks the first k-point: problem?
if 'region_fluctuation' in post_record['results'][0]['results']['properties'].keys():
res['fluct'] = _analyze_nfluct(post_record)
if 'tbdm_basis' in post_record['results'][0]['results']['properties'].keys():
res['ordm'] = _analyze_ordm(post_record)
return res
def _process_vmc(dmc_record):
grouplist = ['jastrow','optimizer']
res = {}
if 'results' not in dmc_record.keys():
return res
res['energy'] = json.loads(pd.DataFrame(dmc_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_energy)\
.reset_index()
.to_json()
)
return res
def _process_dmc(dmc_record):
grouplist = ['timestep','jastrow','localization','optimizer']
res = {}
if 'results' not in dmc_record.keys():
return res
res['energy'] = json.loads(pd.DataFrame(dmc_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_energy)\
.reset_index()
.to_json()
)
return res
def mat_diag_exp(pmat,perr):
''' Mean and variance of diagonal of a matrix (diagonal indices are the
elements of the probability distribution). '''
# Not double-checked yet!
avg,avgerr=0.0,0.0
var,varerr=0.0,0.0
nmax = len(pmat)
for n in range(nmax):
avg += n*pmat[n][n]
avgerr += (n*perr[n][n])**2
avgerr=avgerr**0.5
for n in range(nmax):
var += (n-avg)**2*pmat[n][n]
varerr += (perr[n][n]*(n-avg)**2)**2 +\
(2*pmat[n][n]*avgerr*(n-avg))**2
varerr=varerr**0.5
return avg,avgerr,var,varerr
def old_analyze_nfluct(post_record):
""" Version of _analyze_nfluct where no site-averaging is done.
Useful for external versions. """
def diag_exp(rec):
""" Compute mean and variance. """
res = dict(zip(
('avg','avgerr','var','varerr'),
mat_diag_exp(rec['value'],rec['error'])
))
for info in ['jastrow', 'optimizer', 'localization',
'timestep', 'spini', 'sitei']:
res[info] = rec[info]
return pd.Series(res)
def covar(rec,adf):
""" Compute covariance. """
res={}
res['cov']=0.0
pmat=rec['value']
nmax=len(pmat)
avgi=adf.loc[(rec['spini'],rec['sitei']),'avg']
avgj=adf.loc[(rec['spinj'],rec['sitej']),'avg']
for m in range(nmax):
for n in range(nmax):
res['cov']+=pmat[m][n]*(m-avgi)*(n-avgj)
for info in ['jastrow','optimizer','localization','timestep',
'spini','spinj','sitei','sitej']:
res[info] = rec[info]
return pd.Series(res)
def subspins(siterec):
tmpdf = siterec.set_index('spin')
magmom = tmpdf.loc['up','avg'] - tmpdf.loc['down','avg']
totchg = tmpdf.loc['up','avg'] + tmpdf.loc['down','avg']
magerr = (tmpdf.loc['up','avgerr']**2 + tmpdf.loc['down','avgerr']**2)**0.5
return pd.Series({
'site':siterec['site'].values[0],
'magmom':magmom, 'magmom_err':magerr,
'totchg':totchg, 'totchg_err':magerr
})
# Moments and other arithmatic.
#fluctdf = _kaverage_fluct(post_record['results'])
grouplist = ['timestep','jastrow','localization','optimizer']
fluctdf = pd.DataFrame(post_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_fluct)\
.reset_index()
for s in ['spini','spinj']:
ups = (fluctdf[s] == 0)
fluctdf[s] = "down"
fluctdf.loc[ups,s] = "up"
diag=( (fluctdf['spini']==fluctdf['spinj']) &\
(fluctdf['sitei']==fluctdf['sitej']) )
avgdf=fluctdf[diag].apply(diag_exp,axis=1)
avgdf=avgdf.rename(columns={'spini':'spin','sitei':'site'})
magdf=avgdf.groupby(grouplist+['site']).apply(subspins)
avgdf=pd.merge(avgdf,magdf)
covdf=fluctdf.apply(lambda x: covar(x,avgdf.set_index(['spin','site'])),axis=1)
osspsp=((covdf['spini']!=covdf['spinj'])&(covdf['sitei']==covdf['sitej']))
ossdf=covdf[osspsp].rename(columns={'sitei':'site','spini':'spin'})
avgdf=pd.merge(avgdf,ossdf,on=grouplist+['site','spin'])
del avgdf['sitej']
# Catagorization.
avgdf['netmag'] = "down"
avgdf.loc[avgdf['magmom']>0,'netmag'] = "up"
avgdf['spinchan'] = "minority"
avgdf.loc[avgdf['netmag']==avgdf['spin'],'spinchan'] = "majority"
avgdf['element'] = "Se"
return avgdf
def _analyze_nfluct(post_record):
""" Compute physical values and site-average number fluctuation. """
def diag_exp(rec):
""" Compute mean and variance. """
res = {}
for dat in ['avg','var','avgerr','varerr']:
res[dat] = 0.0
for info in ['jastrow', 'optimizer', 'localization',
'timestep', 'spini', 'sitei']:
res[info] = rec[info]
pmat = rec['value']
perr = rec['error']
nmax = len(pmat)
for n in range(nmax):
res['avg'] += n*pmat[n][n]
res['avgerr'] += (n*perr[n][n])**2
res['avgerr']= res['avgerr']**0.5
for n in range(nmax):
res['var'] += (n-res['avg'])**2*pmat[n][n]
res['varerr'] += (perr[n][n]*(n-res['avg'])**2)**2 +\
(2*pmat[n][n]*res['avgerr']*(n-res['avg']))**2
res['varerr'] = res['varerr']**0.5
return pd.Series(res)
def covar(rec,adf):
""" Compute covariance. """
res={}
res['cov']=0.0
pmat=rec['value']
nmax=len(pmat)
avgi=adf.loc[(rec['spini'],rec['sitei']),'avg']
avgj=adf.loc[(rec['spinj'],rec['sitej']),'avg']
for m in range(nmax):
for n in range(nmax):
res['cov']+=pmat[m][n]*(m-avgi)*(n-avgj)
for info in ['jastrow','optimizer','localization','timestep',
'spini','spinj','sitei','sitej']:
res[info] = rec[info]
return pd.Series(res)
def subspins(siterec):
tmpdf = siterec.set_index('spin')
magmom = tmpdf.loc['up','avg'] - tmpdf.loc['down','avg']
totchg = tmpdf.loc['up','avg'] + tmpdf.loc['down','avg']
magerr = (tmpdf.loc['up','avgerr']**2 + tmpdf.loc['down','avgerr']**2)**0.5
return pd.Series({
'site':siterec['site'].values[0],
'magmom':magmom, 'magmom_err':magerr,
'totchg':totchg, 'totchg_err':magerr
})
def siteaverage(sgrp):
tol=10*sgrp['varerr'].mean()
if sgrp['var'].std() > tol:
print("nfluct: Site average warning: variation in sites larger than expected.")
print("%f > %f"%(sgrp['var'].std(),tol))
return pd.Series({
'variance':sgrp['var'].mean(),
'variance_err':(sgrp['varerr']**2).mean()**0.5,
'magmom':abs(sgrp['magmom'].values).mean(),
'totchg':abs(sgrp['totchg'].values).mean(),
'magmom_err':(sgrp['magmom_err']**2).mean()**0.5,
'covariance':sgrp['cov'].mean()
})
# Moments and other arithmatic.
#fluctdf = _kaverage_fluct(post_record['results'])
grouplist = ['timestep','jastrow','localization','optimizer']
fluctdf = pd.DataFrame(post_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_fluct)\
.reset_index()
for s in ['spini','spinj']:
ups = (fluctdf[s] == 0)
fluctdf[s] = "down"
fluctdf.loc[ups,s] = "up"
diag=( (fluctdf['spini']==fluctdf['spinj']) &\
(fluctdf['sitei']==fluctdf['sitej']) )
avgdf=fluctdf[diag].apply(diag_exp,axis=1)
avgdf=avgdf.rename(columns={'spini':'spin','sitei':'site'})
magdf=avgdf.groupby(grouplist+['site']).apply(subspins)
avgdf=pd.merge(avgdf,magdf)
covdf=fluctdf.apply(lambda x: covar(x,avgdf.set_index(['spin','site'])),axis=1)
osspsp=((covdf['spini']!=covdf['spinj'])&(covdf['sitei']==covdf['sitej']))
ossdf=covdf[osspsp].rename(columns={'sitei':'site','spini':'spin'})
avgdf=pd.merge(avgdf,ossdf,on=grouplist+['site','spin'])
# Catagorization.
avgdf['netmag'] = "down"
avgdf.loc[avgdf['magmom']>0,'netmag'] = "up"
avgdf['spinchan'] = "minority"
avgdf.loc[avgdf['netmag']==avgdf['spin'],'spinchan'] = "majority"
avgdf['element'] = "Se"
avgdf.loc[avgdf['site']<NFE,'element'] = "Fe"
# Site average.
## Debug site averaging (ensure averaging is reasonable).
#for lab,df in avgdf.groupby(grouplist+['spinchan','element']):
# print(lab)
# print(df[['avg','avgerr','var','varerr','cov']])
savgdf = avgdf.groupby(grouplist+['spinchan','element'])\
.apply(siteaverage)\
.reset_index()
magdf = savgdf.drop(['spinchan','variance','variance_err','covariance'],axis=1).drop_duplicates()
covdf = savgdf.drop(['magmom','magmom_err'],axis=1)
return { 'magmom':json.loads(magdf.to_json()),
'covariance':json.loads(covdf.to_json()) }
def analyze_ordm(post_record,orbmap):
""" Compute physical values and site-average 1-body RDM. """
grouplist = ['timestep','jastrow','localization','optimizer']
# k-average (currently selects gamma-only due to bug).
ordmdf = pd.DataFrame(post_record['results'])\
.groupby(['timestep','jastrow','localization','optimizer'])\
.apply(_kaverage_ordm)\
.reset_index()
# Classify orbitals based on index.
infodf = ordmdf['orbni'].drop_duplicates().apply(lambda orbnum:
pd.Series(dict(zip(['orbnum','elem','atom','orb'],orbmap[orbnum]))))
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbni',right_on='orbnum')
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbnj',right_on='orbnum',
suffixes=("i","j"))
ordmdf = ordmdf.drop(['orbnumi','orbnumj'],axis=1)
# Classify atoms based on spin occupations.
occdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.groupby(grouplist+['atomi'])\
.agg({'up':np.sum,'down':np.sum})\
.reset_index()\
.rename(columns={'atomi':'at'})
occdf['net'] = occdf['up'] - occdf['down']
occdf = occdf.drop(['up','down'],axis=1)
occdf['atspin'] = 'up'
occdf.loc[occdf['net'] < 0,'atspin'] = 'down'
occdf.loc[occdf['net'].abs() < 1e-1,'atspin'] = 'zero'
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomi'],right_on=grouplist+['at'])
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomj'],right_on=grouplist+['at'],
suffixes=('i','j'))\
.drop(['ati','atj'],axis=1)
ordmdf['rel_atspin'] = "antiparallel"
ordmdf.loc[ordmdf['atspini']==ordmdf['atspinj'],'rel_atspin'] = "parallel"
ordmdf.loc[ordmdf['atspini']=='zero','rel_atspin'] = "zero"
ordmdf.loc[ordmdf['atspinj']=='zero','rel_atspin'] = "zero"
# Classify spin channels based on minority and majority channels.
ordmdf = ordmdf.set_index([c for c in ordmdf.columns
if c not in ['up','down','up_err','down_err']])
vals = ordmdf[['up','down']].stack()
vals.index.names = vals.index.names[:-1]+['spin']
errs = ordmdf[['up_err','down_err']]\
.rename(columns={'up_err':'up','down_err':'down'})\
.stack()
errs.index.names = errs.index.names[:-1]+['spin']
ordmdf = pd.DataFrame({'ordm':vals,'ordm_err':errs}).reset_index()
ordmdf['spini'] = "minority"
ordmdf['spinj'] = "minority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspini'],'spini'] = "majority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspinj'],'spinj'] = "majority"
ordmdf.loc[ordmdf['atspini'] == 'zero','spini'] = 'neither'
ordmdf.loc[ordmdf['atspinj'] == 'zero','spinj'] = 'neither'
return ordmdf
def _analyze_ordm(post_record):
""" Compute physical values and site-average 1-body RDM. """
def saverage_orb(sgrp):
tol=10*sgrp['ordm_err'].mean()
if sgrp['ordm'].std() > tol:
print("saverage_orb: Site average warning: variation in sites larger than expected.")
print("%.3f > %.3f"%(sgrp['ordm'].std(),tol))
return pd.Series({
'ordm':sgrp['ordm'].mean(),
'ordm_err':(sgrp['ordm_err']**2).mean()**0.5,
})
def saverage_hop(sgrp):
tol=10*sgrp['ordm_err'].mean()
if sgrp['ordm'].std() > tol:
print("saverage_hop: Site average warning: variation in sites larger than expected.")
print("%.3f > %.3f"%(sgrp['ordm'].std(),tol))
return pd.Series({
'ordm':sgrp['ordm'].mean(),
'ordm_err':(sgrp['ordm_err']**2).mean()**0.5,
})
grouplist = ['timestep','jastrow','localization','optimizer']
# k-average (currently selects gamma-only due to bug).
ordmdf = pd.DataFrame(post_record['results'])\
.groupby(['timestep','jastrow','localization','optimizer'])\
.apply(_kaverage_ordm)\
.reset_index()
# Classify orbitals based on index.
infodf = ordmdf['orbni'].drop_duplicates().apply(lambda orbnum:
pd.Series(dict(zip(['orbnum','elem','atom','orb'],orbinfo(orbnum)))))
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbni',right_on='orbnum')
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbnj',right_on='orbnum',
suffixes=("i","j"))
ordmdf = ordmdf.drop(['orbnumi','orbnumj'],axis=1)
# Classify atoms based on spin occupations.
occdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.groupby(grouplist+['atomi'])\
.agg({'up':np.sum,'down':np.sum})\
.reset_index()\
.rename(columns={'atomi':'at'})
occdf['net'] = occdf['up'] - occdf['down']
occdf = occdf.drop(['up','down'],axis=1)
occdf['atspin'] = 'up'
occdf.loc[occdf['net'] < 0,'atspin'] = 'down'
occdf.loc[occdf['net'].abs() < 1e-1,'atspin'] = 'zero'
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomi'],right_on=grouplist+['at'])
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomj'],right_on=grouplist+['at'],
suffixes=('i','j'))\
.drop(['ati','atj'],axis=1)
ordmdf['rel_atspin'] = "antiparallel"
ordmdf.loc[ordmdf['atspini']==ordmdf['atspinj'],'rel_atspin'] = "parallel"
ordmdf.loc[ordmdf['atspini']=='zero','rel_atspin'] = "zero"
ordmdf.loc[ordmdf['atspinj']=='zero','rel_atspin'] = "zero"
# Classify spin channels based on minority and majority channels.
ordmdf = ordmdf.set_index([c for c in ordmdf.columns
if c not in ['up','down','up_err','down_err']])
vals = ordmdf[['up','down']].stack()
vals.index.names = vals.index.names[:-1]+['spin']
errs = ordmdf[['up_err','down_err']]\
.rename(columns={'up_err':'up','down_err':'down'})\
.stack()
errs.index.names = errs.index.names[:-1]+['spin']
ordmdf = pd.DataFrame({'ordm':vals,'ordm_err':errs}).reset_index()
ordmdf['spini'] = "minority"
ordmdf['spinj'] = "minority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspini'],'spini'] = "majority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspinj'],'spinj'] = "majority"
ordmdf.loc[ordmdf['atspini'] == 'zero','spini'] = 'neither'
ordmdf.loc[ordmdf['atspinj'] == 'zero','spinj'] = 'neither'
# Focus in on orbital occupations.
orboccdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.drop([col for col in ordmdf.columns if col[-1]=='j'],1)\
.groupby(grouplist+['elemi','orbi','spini'])\
.apply(saverage_orb)\
.reset_index()
# Focus in on parallel or antiparallel hopping.
orbsumsel = grouplist+['atomi','atomj','elemi','elemj','rel_atspin','spini','spinj']
siteavgsel = [c for c in orbsumsel if c not in ['atomi','atomj']]
hopdf = ordmdf[ordmdf['atomi'] != ordmdf['atomj']]\
.groupby(orbsumsel)\
.agg({'ordm':lambda x:x.abs().sum(), 'ordm_err':lambda x:sum(x**2)**0.5})\
.reset_index()\
.groupby(siteavgsel)\
.agg({'ordm':np.mean, 'ordm_err':lambda x:np.mean(x**2)**0.5})\
.reset_index()
return {'orb':json.loads(orboccdf.to_json()),
'hop':json.loads(hopdf.to_json())}
def _kaverage_energy(kavgdf):
# Keep unpacking until reaching energy.
egydf = \
unpack(
unpack(
unpack(
kavgdf
['results'])\
['properties'])\
['total_energy']).applymap(dp.unlist)
# TODO generalize!
weights = np.tile(1./egydf['value'].shape[0],egydf['value'].shape)
return pd.Series({
"value":(weights*egydf['value'].values).sum(),
"error":((weights*egydf['error'].values)**2).sum()**.5
})
def _kaverage_fluct(reclist):
# Warning! _kaverage_qmc() assuming equal k-point weight!
datdf = \
unpack(
unpack(
unpack(
unpack(
pd.DataFrame(reclist)\
['results'])\
['properties'])\
['region_fluctuation'])\
['fluctuation data'])
spiniser = datdf.applymap(lambda x: x['spin'][0]).drop_duplicates()
spinjser = datdf.applymap(lambda x: x['spin'][1]).drop_duplicates()
siteiser = datdf.applymap(lambda x: x['region'][0]).drop_duplicates()
sitejser = datdf.applymap(lambda x: x['region'][1]).drop_duplicates()
valser = datdf.applymap(lambda x: x['value']).apply(dp.mean_array)
errser = datdf.applymap(lambda x: x['error']).apply(dp.mean_array_err)
# Safely turn DataFrame into Series.
if spiniser.shape[0] == 1: spiniser = spiniser.iloc[0]
if spinjser.shape[0] == 1: spinjser = spinjser.iloc[0]
if siteiser.shape[0] == 1: siteiser = siteiser.iloc[0]
if sitejser.shape[0] == 1: sitejser = sitejser.iloc[0]
ret = pd.DataFrame({
'spini':spiniser,
'spinj':spinjser,
'sitei':siteiser,
'sitej':sitejser,
'value':valser,
'error':errser
}).set_index(['spini','spinj','sitei','sitej'])
return ret
def _kaverage_ordm(kavgdf):
# Warning! _kaverage_qmc() assuming equal k-point weight!
datdf =\
unpack(
unpack(
unpack(
unpack(
kavgdf\
['results'])\
['properties'])\
['tbdm_basis'])\
['obdm'])
res = pd.DataFrame(datdf['up'].iloc[0]).stack().to_frame('up')
res = res.join(pd.DataFrame(datdf['down'].iloc[0]).stack().to_frame('down'))
res = res.join(pd.DataFrame(datdf['up_err'].iloc[0]).stack().to_frame('up_err'))
res = res.join(pd.DataFrame(datdf['down_err'].iloc[0]).stack().to_frame('down_err'))
res = res.reset_index()\
.rename(columns={'level_0':'orbni','level_1':'orbnj'})\
.set_index(['orbni','orbnj'])
return res
def _check_spins(dft_record,small=1.0):
""" Check that the spins that were set at the beginning correspond to the
spins it ends up having. Anything less than small is considered zero."""
init_spins = dft_record['initial_spin']
moms = dft_record['mag_moments']
moms = np.array(moms)
print(init_spins)
print(moms)
zs = abs(moms) < small
up = moms > 0.
dn = moms < 0.
moms.dtype = int
moms[up] = 1
moms[dn] = -1
moms[zs] = 0
if len(init_spins) < len(moms):
init_spins = np.append(init_spins,np.zeros(len(moms)-len(init_spins)))
if len(init_spins)==0:
if (moms == np.zeros(moms.shape)).all():
return True
else:
return False
else:
# Note casting prevents numpy.bool.
return bool((moms == np.array(init_spins)).all())
def orbinfo(orbnum):
""" Compute orbital info based on orbital number: [element,atomnum,orbital].
Currently only useful for Fe-chalcogenides. Warning: this depends on how you
define the basis!"""
NFe = 8
NSe = 8
# CRYSTAL: 'order of internal storage'.
# s, px, py, pz, dz2-r2, dxz, dyz, dx2-y2, dxy, ...
Feorbs = ['3s','3px','3py','3pz','4s','3dz2-r2','3dxz','3dyz','3dx2-y2','3dxy']
Seorbs = ['3s','3px','3py','3pz']
NbFe = len(Feorbs)
NbSe = len(Seorbs)
res = [orbnum]
if float(orbnum)/(NFe * NbFe) > (1 - 1e-8):
res += ['Se',(orbnum - NFe*NbFe) // NbSe + 1 + NFe]
res.append(Seorbs[orbnum%NbSe])
else:
res += ['Fe',orbnum // NbFe + 1]
res.append(Feorbs[orbnum%NbFe])
return res
###############################################################################
# Format autogen group of function.
def format_datajson(inp_json="results.json",filterfunc=lambda x:True):
""" Takes processed autogen json file and organizes it into a Pandas DataFrame."""
rawdf = pd.read_json(open(inp_json,'r'))
rawdf['ncell'] = rawdf['supercell'].apply(lambda x:
abs(np.linalg.det(np.array(x).reshape(3,3)))
)
# Unpacking the energies.
alldf = _format_dftdf(rawdf)
for qmc in ['vmc','dmc']:
qmcdf = unpack(rawdf[qmc])
if 'energy' in qmcdf.columns:
qmcdf = qmcdf.join(
unpack(qmcdf['energy'].dropna()).applymap(dp.undict)
)
qmcdf = qmcdf\
.rename(columns={'value':"%s_energy"%qmc,'error':"%s_energy_err"%qmc})\
.drop('energy',axis=1)
# FIXME some bug in reading the jastrow and optimizer, not sure where it's coming from.
qmcdf.loc[qmcdf['jastrow'].isnull(),'jastrow']='twobody'
qmcdf.loc[qmcdf['optimizer'].isnull(),'optimizer']='energy'
alldf = alldf.join(qmcdf,lsuffix='',rsuffix='_new')
for col in alldf.columns:
if '_new' in col:
sel=alldf[col].notnull()
diff=alldf.loc[sel,col.replace('_new','')]!=alldf.loc[sel,col]
assert not any(diff),'''
Joined QMC data changed something. {}'''.format(alldf.loc[sel,[col,col+'_new']][diff])
#assert all(alldf.loc[sel,col.replace('_new','')]==alldf.loc[sel,col])
del alldf[col]
if "%s_energy"%qmc in qmcdf.columns:
alldf["%s_energy"%qmc] = alldf["%s_energy"%qmc]
alldf["%s_energy_err"%qmc] = alldf["%s_energy_err"%qmc]
listcols = [
'broyden',
'initial_charges',
'energy_trace',
'initial_spin',
'kmesh',
'levshift',
# 'localization',
# 'timestep',
# 'jastrow',
# 'optimizer'
]
alldf=alldf[alldf['id'].apply(filterfunc)]
if 'mag_moments' in alldf.columns: listcols.append('mag_moments')
# Convert lists.
for col in listcols:
if col in alldf.columns:
alldf.loc[alldf[col].notnull(),col] = \
alldf.loc[alldf[col].notnull(),col].apply(lambda x:tuple(x))
for col in alldf.columns:
alldf[col] = pd.to_numeric(alldf[col],errors='ignore')
if 'cif' in alldf.keys():
alldf = alldf.join(
alldf.loc[alldf['cif']!="None",'cif'].apply(extract_struct),
rsuffix="_extract"
)
return alldf
def cast_supercell(sup):
for rix,row in enumerate(sup):
sup[rix] = tuple(row)
return tuple(sup)
def make_basis_consistent(row):
if type(row['basis'])==dict:
return row['basis']
atoms=list(row['initial_charges'].keys())
return dict(zip(atoms,[row['basis'] for a in atoms]))
def _format_dftdf(rawdf):
def desect_basis(basis_info):
if type(basis_info)==list:
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],basis_info)))
# This part of the method is for the old basis part.
elif type(basis_info)==dict:
min_basis = 1e10
for atom in basis_info.keys():
new = min([np.array(elem['coefs'])[0,:].min() for elem in basis_info[atom]])
if new < min_basis: min_basis = new
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],[min_basis,0,0])))
# For now taking the best part of each atom so it works.
# This is the case of split basis, which I determined is not that useful.
# Not sure if this is the best behavior.
#elif type(basis_info)==dict:
# min_basis = min((basis[0] for atom,basis in basis_info.items()))
# max_factor = max((basis[1] for atom,basis in basis_info.items()))
# max_number = max((basis[2] for atom,basis in basis_info.items()))
# return pd.Series(dict(zip(
# ['basis_lowest','basis_number','basis_factor'],[min_basis,max_factor,max_number])))
else:
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],[0,0,0])))
def hashable_basis(basis_info):
if type(basis_info)==dict:
atoms=sorted(basis_info.keys())
return tuple(zip(atoms,(tuple(basis_info[a]) for a in atoms)))
else:
return tuple(basis_info)
ids = rawdf['control'].apply(lambda x:x['id'])
dftdf = unpack(rawdf['dft'])
dftdf = dftdf.join(ids).rename(columns={'control':'id'})
copylist = ['supercell','ncell','cif','xyz','a','c','se_height','pressure','ordering','total_spin']
for rawinfo in copylist:
if rawinfo in rawdf.columns:
dftdf = dftdf.join(rawdf[rawinfo])
funcdf = pd.DataFrame(dftdf['functional'].to_dict()).T
dftdf = dftdf.join(funcdf)
dftdf['tolinteg'] = dftdf['tolinteg'].apply(lambda x:x[0])
dftdf['spins_consistent'] = dftdf['spins_consistent'].astype(bool)
dftdf = dftdf.join(dftdf['basis'].apply(desect_basis))
#dftdf['basis']=dftdf.apply(make_basis_consistent,axis=1)
#dftdf['basis'] = dftdf['basis'].apply(hashable_basis)
dftdf['basis_number'] = dftdf['basis_number'].astype(int)
dftdf.loc[dftdf['supercell'].notnull(),'supercell'] = \
dftdf.loc[dftdf['supercell'].notnull(),'supercell']\
.apply(lambda x:cast_supercell(x))
dftdf.loc[dftdf['levshift'].isnull(),'levshift']=\
dftdf.loc[dftdf['levshift'].isnull(),'levshift']\
.apply(lambda x:(0.0,0))
dftdf['levshift_shift']=dftdf['levshift'].apply(lambda x: x[0])
if 'mag_moments' in dftdf.columns:
dftdf['max_mag_moment'] = np.nan
dftdf.loc[dftdf['mag_moments'].notnull(),'max_mag_moment'] =\
dftdf.loc[dftdf['mag_moments'].notnull(),'mag_moments'].apply(lambda x:
max(abs(np.array(x)))
)
dftdf['dft_energy'] = dftdf['total_energy']
dftdf=dftdf.drop(['functional'],axis=1)
return dftdf
###############################################################################
# Misc. tools.
def unpack(ser):
""" Attempt to turn a series of dictionaries into a new DataFrame.
Works with most autogen levels of data storage. """
return pd.DataFrame(ser.to_dict()).T
# Tuple to DF entry.
def parse_err(df,key='energy'):
tmpdf = df[key].apply(lambda x: pd.Series({key:x[0],'energy_err':x[1]}))
del df[key]
return df.join(tmpdf)
# Get out atomic positions (and possibly more later).
# Pretty slow: can be made faster by saving cifs that are already done.
def extract_struct(cifstr):
parser = CifParser.from_string(cifstr)\
.get_structures()[0]\
.as_dict()
lat_a = parser['lattice']['a']
lat_b = parser['lattice']['b']
lat_c = parser['lattice']['c']
poss = [
tuple(site['abc']) for site in
parser['sites']
]
ions = [
site['species'][0]['element'] for site in
parser['sites']
]
positions = {}
for iidx,ion in enumerate(ions):
if ion in positions.keys():
positions[ion].append(poss[iidx])
else:
positions[ion] = [poss[iidx]]
for key in positions.keys():
positions[key] = np.array(positions[key])
return pd.Series(
[lat_a,lat_b,lat_c,positions],
['a','b','c','positions']
)
def match(df,cond,keys):
match_this = df.loc[cond,keys]
if len(match_this)>1:
print("Multiple rows match:")
print(match_this)
raise AssertionError("Row match not unique")
match_this = match_this.iloc[0].values
return df.set_index(keys).xs(match_this,level=keys).reset_index()
def find_duplicates(df,def_cols):
#TODO hard to compare arrays with NANs correctly.
duped=df[def_cols]
clean=df.drop_duplicates()
duped.drop(clean.index)
##############################################################################
# Testing.
if __name__=='__main__':
datajson=process_record(json.load(open('exampledata/fese_mags_0.record.json','r')))
print(datajson['dmc']['fluct'].keys())
| gpl-2.0 |
frrp/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/backends/backend_gtkagg.py | 11 | 4354 | """
Render to gtk from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print('backend_gtkagg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKAgg(figure)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print('backend_gtkagg.new_figure_manager done')
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print('FigureCanvasGTKAgg.configure_event')
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print('FigureCanvasGTKAgg.configure_event end')
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print('FigureCanvasGTKAgg.render_figure')
FigureCanvasAgg.draw(self)
if DEBUG: print('FigureCanvasGTKAgg.render_figure pixmap', pixmap)
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba()
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print('FigureCanvasGTKAgg.render_figure done')
def blit(self, bbox=None):
if DEBUG: print('FigureCanvasGTKAgg.blit', self._pixmap)
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print('FigureCanvasGTKAgg.done')
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
FigureCanvas = FigureCanvasGTKAgg
FigureManager = FigureManagerGTKAgg
| mit |
toobaz/pandas | pandas/util/_test_decorators.py | 2 | 6300 | """
This module provides decorator functions which can be applied to test objects
in order to skip those objects when certain conditions occur. A sample use case
is to detect if the platform is missing ``matplotlib``. If so, any test objects
which require ``matplotlib`` and decorated with ``@td.skip_if_no_mpl`` will be
skipped by ``pytest`` during the execution of the test suite.
To illustrate, after importing this module:
import pandas.util._test_decorators as td
The decorators can be applied to classes:
@td.skip_if_some_reason
class Foo:
...
Or individual functions:
@td.skip_if_some_reason
def test_foo():
...
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
from distutils.version import LooseVersion
import locale
from typing import Optional
from _pytest.mark.structures import MarkDecorator
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
from pandas.compat.numpy import _np_version
from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR
def safe_import(mod_name, min_version=None):
"""
Parameters:
-----------
mod_name : str
Name of the module to be imported
min_version : str, default None
Minimum required version of the specified mod_name
Returns:
--------
object
The imported module if successful, or False
"""
try:
mod = __import__(mod_name)
except ImportError:
return False
if not min_version:
return mod
else:
import sys
try:
version = getattr(sys.modules[mod_name], "__version__")
except AttributeError:
# xlrd uses a capitalized attribute name
version = getattr(sys.modules[mod_name], "__VERSION__")
if version:
from distutils.version import LooseVersion
if LooseVersion(version) >= LooseVersion(min_version):
return mod
return False
def _skip_if_no_mpl():
mod = safe_import("matplotlib")
if mod:
mod.use("Agg", warn=True)
else:
return True
def _skip_if_has_locale():
lang, _ = locale.getlocale()
if lang is not None:
return True
def _skip_if_not_us_locale():
lang, _ = locale.getlocale()
if lang != "en_US":
return True
def _skip_if_no_scipy():
return not (
safe_import("scipy.stats")
and safe_import("scipy.sparse")
and safe_import("scipy.interpolate")
and safe_import("scipy.signal")
)
def skip_if_installed(package: str,) -> MarkDecorator:
"""
Skip a test if a package is installed.
Parameters
----------
package : str
The name of the package.
"""
return pytest.mark.skipif(
safe_import(package), reason="Skipping because {} is installed.".format(package)
)
def skip_if_no(package: str, min_version: Optional[str] = None) -> MarkDecorator:
"""
Generic function to help skip tests when required packages are not
present on the testing system.
This function returns a pytest mark with a skip condition that will be
evaluated during test collection. An attempt will be made to import the
specified ``package`` and optionally ensure it meets the ``min_version``
The mark can be used as either a decorator for a test function or to be
applied to parameters in pytest.mark.parametrize calls or parametrized
fixtures.
If the import and version check are unsuccessful, then the test function
(or test case when used in conjunction with parametrization) will be
skipped.
Parameters
----------
package: str
The name of the required package.
min_version: str or None, default None
Optional minimum version of the package.
Returns
-------
_pytest.mark.structures.MarkDecorator
a pytest.mark.skipif to use as either a test decorator or a
parametrization mark.
"""
msg = "Could not import '{}'".format(package)
if min_version:
msg += " satisfying a min_version of {}".format(min_version)
return pytest.mark.skipif(
not safe_import(package, min_version=min_version), reason=msg
)
skip_if_no_mpl = pytest.mark.skipif(
_skip_if_no_mpl(), reason="Missing matplotlib dependency"
)
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present")
skip_if_32bit = pytest.mark.skipif(is_platform_32bit(), reason="skipping for 32 bit")
skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows")
skip_if_windows_python_3 = pytest.mark.skipif(
is_platform_windows(), reason="not used on win32"
)
skip_if_has_locale = pytest.mark.skipif(
_skip_if_has_locale(),
reason="Specific locale is set {lang}".format(lang=locale.getlocale()[0]),
)
skip_if_not_us_locale = pytest.mark.skipif(
_skip_if_not_us_locale(),
reason="Specific locale is set " "{lang}".format(lang=locale.getlocale()[0]),
)
skip_if_no_scipy = pytest.mark.skipif(
_skip_if_no_scipy(), reason="Missing SciPy requirement"
)
skip_if_no_ne = pytest.mark.skipif(
not _USE_NUMEXPR,
reason="numexpr enabled->{enabled}, "
"installed->{installed}".format(enabled=_USE_NUMEXPR, installed=_NUMEXPR_INSTALLED),
)
def skip_if_np_lt(ver_str, reason=None, *args, **kwds):
if reason is None:
reason = "NumPy %s or greater required" % ver_str
return pytest.mark.skipif(
_np_version < LooseVersion(ver_str), reason=reason, *args, **kwds
)
def parametrize_fixture_doc(*args):
"""
Intended for use as a decorator for parametrized fixture,
this function will wrap the decorated function with a pytest
``parametrize_fixture_doc`` mark. That mark will format
initial fixture docstring by replacing placeholders {0}, {1} etc
with parameters passed as arguments.
Parameters:
----------
args: iterable
Positional arguments for docstring.
Returns:
-------
documented_fixture: function
The decorated function wrapped within a pytest
``parametrize_fixture_doc`` mark
"""
def documented_fixture(fixture):
fixture.__doc__ = fixture.__doc__.format(*args)
return fixture
return documented_fixture
| bsd-3-clause |
scikit-learn-contrib/forest-confidence-interval | forestci/version.py | 2 | 2065 | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 5
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "forestci: confidence intervals for scikit-learn "
description += "forest algorithms"
# Long description will go up on the pypi page
long_description = """
sklearn forest ci
=================
`forest-confidence-interval` is a Python module for calculating variance and
adding confidence intervals to scikit-learn random forest regression or
classification objects. The core functions calculate an in-bag and error bars
for random forest objects
Please read the repository README_ on Github or our documentation_
.. _README: https://github.com/scikit-learn-contrib/forest-confidence-interval/blob/master/README.md
.. _documentation: http://contrib.scikit-learn.org/forest-confidence-interval/
"""
NAME = "forestci"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/scikit-learn-contrib/forest-confidence-interval"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem, Bryna Hazelton, Kivan Polimis"
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
| mit |
mykoz/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
siutanwong/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
laas/EnergyComputation | src/make_plot.py | 1 | 26024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import matplotlib.pyplot as plt
from decimal import Decimal
from math import *
class XP :
def __init__(self):
self.WalkedDistance_list = []
self.Fall_list = []
self.MaxtrackingError_list = []
self.DurationOfTheExperiment_list = []
self.EnergyOfMotors_list = []
self.EnergyOfWalking_list = []
self.CostOfTransport_list = []
self.MechaCostOfTransport_list = []
self.Froude_list = []
self.algo = ""
self.setup = ""
self.algo_dico = {"10cm":1,"15cm":2,"hwalk":3,"NPG":4,"Beam":5,"kawada":6,"Multiple algorithms":7}#,"Morisawa":8}#"Stepping stones":7,
#"Down step":8,"Muscode":9}
self.setup_dico = {'degrees':1,'Bearing':2,'Pushes':3,'Slopes':4,'Translations\nFB':5,'Translations\nSIDE':6,
'Gravels':7,'Slip floor \nblack carpet':8,'Slip floor \ngreen carpet':9,
'Slip floor \nnormal ground':10,"bricks":11,'Slopes_':12,"stairs_":13,"obstacle 20cm":14}
self.kpi_list = ["Walked distance","Success rate","Max tracking error",
"Duration of the experiment","Mechanical energy","Total energy",
"Total cost of transport","Mechanical cost of transport","Froude number"]
self.dimension_list = ["m","Dimensionless","rad","s","J.m-1.s-1","J.m-1.s-1","Dimensionless",
"Dimensionless","Dimensionless"]
self.success_rate = 0.0
#self.direction = ""
self.headers = []
def __str__(self):
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items())
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convert_fall(my_string):
if my_string=="true":
return True
elif my_string=="false":
return False
def read_file(file_name):
print "reading file"
results_file = open(file_name,"r")
list_lines_str = []
list_lines_str_split = []
list_lines_split = []
header_line = []
header_file = results_file.readline()
for line in results_file :
list_lines_str.append(line)
#print "list_lines_str",list_lines_str
list_lines_str_split.append(list_lines_str[-1].split())
#print "list_lines_str_split",list_lines_str_split
header_line.append(list_lines_str_split[-1].pop(0))
#print "header_line",header_line, " list_lines_str_split ", list_lines_str_split
list_lines_split.append([float(word) for word in list_lines_str_split[-1] if isfloat(word)])
#for i in len(list_lines_str_split):
list_lines_split[-1].insert(1,convert_fall(list_lines_str_split[-1][1]))
#print "list_lines_str_split", list_lines_str_split
#print "list_lines_split",list_lines_split
return header_file,header_line,list_lines_split
def discrimin_xp(header_file,header_line,list_lines_split):
print "discrimin xp"
xp_list = []
previous_algo = ""
previous_setup = 0
# previous_direction = ""
# current_direction = ""
for i in range(len(list_lines_split)) :
if header_line[i].find("10cm") != -1:
current_algo = "10cm"
elif header_line[i].find("15cm") != -1:
current_algo = "15cm"
elif header_line[i].find("hwalk") != -1:
current_algo = "hwalk"
elif header_line[i].find("PG") != -1:
current_algo = "NPG"
elif header_line[i].find("Beam") != -1:
current_algo = "Beam"
elif header_line[i].find("kawada") != -1:
current_algo = "kawada"
elif header_line[i].find("gravles") != -1:
current_algo = "hwalk"
elif header_line[i].find("slipFloor") != -1:
current_algo = "hwalk"
elif header_line[i].find("climbSlope") != -1:
current_algo = "Multiple algorithms"
elif header_line[i].find("ClimbingWithTools") != -1:
current_algo = "Multiple algorithms"
elif header_line[i].find("StepStairsDownSeq") != -1:
current_algo = "Multiple algorithms"#"Down step"
elif header_line[i].find("stepOver") != -1:
current_algo = "Multiple algorithms"#"Muscode"
elif header_line[i].find("SteppingStones") != -1:
current_algo = "Multiple algorithms"#"Stepping stones"
else :
print "no algo pattern found in this line, \n",header_line[i]
sys.exit(1)
print header_line[i]
if header_line[i].find("degrees") != -1:
deg_index = header_line[i].find("degrees")
current_setup = header_line[i][(deg_index-2):deg_index]+"°C"
elif header_line[i].find("Bearing") != -1:
current_setup = "Brg"#"Bearing"
elif header_line[i].find("Pushes") != -1:
current_setup = "Psh"#"Pushes"
elif header_line[i].find("Slopes") != -1:
current_setup = "Slne"#"Slopes"
elif header_line[i].find("translation") != -1 and header_line[i].find("FB") != -1:
current_setup = "TrslFB"#"Translations_FB"
elif header_line[i].find("translation") != -1 and header_line[i].find("SIDE") != -1:
current_setup = "TrslSD"#"Translations_SIDE"
elif header_line[i].find("gravles") != -1:
current_setup = "Grvl"#"Gravels"
elif header_line[i].find("slipFloor_backCarpet") != -1:
current_setup = "FrcB"#"Slip floor \nblack carpet"
elif header_line[i].find("slipFloor_greenCarpe") != -1:
current_setup = "FrcG"#"Slip floor \ngreen carpet"
elif header_line[i].find("slipFloor_normal_floor") != -1:
current_setup = "FrcN"#"Slip floor \nnormal ground"
elif header_line[i].find("climbSlope") != -1:
current_setup = "Skor"#"Slopes_"
elif header_line[i].find("ClimbingWithTools") != -1:
current_setup = "tool upstairs"#"stairs_"
elif header_line[i].find("StepStairsDownSeq") != -1:
current_setup = "down stairs"#"stairs_"
elif header_line[i].find("stepOver") != -1:
current_setup = "muscode"#"obstacle 20cm"
elif header_line[i].find("SteppingStones") != -1:
current_setup = "stepping stones" # "bricks"
else :
current_setup = "nothing"
if previous_algo != current_algo or previous_setup!=current_setup : #or previous_direction!=current_direction:
print "new xp detected, algo : ", current_algo, " setup : ",current_setup#, " ", current_direction
xp = XP()
xp.algo = current_algo
xp_list.append(xp)
xp_list[-1].WalkedDistance_list.append(list_lines_split[i][0])
xp_list[-1].Fall_list.append(list_lines_split[i][1])
xp_list[-1].MaxtrackingError_list.append(list_lines_split[i][2])
xp_list[-1].DurationOfTheExperiment_list.append(list_lines_split[i][3])
xp_list[-1].EnergyOfMotors_list.append(list_lines_split[i][4])
xp_list[-1].EnergyOfWalking_list.append(list_lines_split[i][5])
xp_list[-1].CostOfTransport_list.append(list_lines_split[i][6])
xp_list[-1].MechaCostOfTransport_list.append(list_lines_split[i][7])
xp_list[-1].Froude_list.append(list_lines_split[i][8])
xp_list[-1].headers.append(header_line[i])
if xp_list[-1].setup=="muscode" or (xp_list[-1].algo=="NPG" and xp_list[-1].setup=="10°C"):
xp_list[-1].Fall_list[-1]=False
xp_list[-1].setup = current_setup
#xp_list[-1].direction=current_direction
print xp_list[-1].algo," ", xp_list[-1].setup
previous_algo = current_algo
previous_setup = current_setup
#previous_direction=current_direction
print "xp_list[",i,"] : ", len(xp_list)
#for xp in xp_list:
# print "xp ::: ", xp.algo, " ", xp.setup
#for xp in xp_list:
# print "xp ::: ", xp.algo," ", xp.setup
return xp_list
def mean_xp(xp_list) :
#for xp in xp_list:
# print "xp ::: ", xp.algo," ", xp.setup
list_mean_xp = []
xp_index_to_rm =[]
for xp in xp_list :
print "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
print xp.algo," ", xp.setup
if xp.setup=="muscode":
print xp.Fall_list
print " LEN : ",len(xp.WalkedDistance_list),len(xp.Fall_list),len(xp.MaxtrackingError_list),len(xp.DurationOfTheExperiment_list),len(xp.EnergyOfWalking_list),len(xp.EnergyOfMotors_list),len(xp.CostOfTransport_list),len(xp.MechaCostOfTransport_list),len(xp.Froude_list)
print "before rm_absurd_values"
skip_this_xp = rm_absurd_values(xp)
print " LEN : ",len(xp.WalkedDistance_list),len(xp.Fall_list),len(xp.MaxtrackingError_list),len(xp.DurationOfTheExperiment_list),len(xp.EnergyOfWalking_list),len(xp.EnergyOfMotors_list),len(xp.CostOfTransport_list),len(xp.MechaCostOfTransport_list),len(xp.Froude_list)
print "after rm_absurd_values"
#print "nb_of_xp : ", nb_of_xp
if not skip_this_xp :
nb_of_xp = len(xp.WalkedDistance_list)
#print "nb_of_xp : ", nb_of_xp
list_mean_xp.append((np.mean(xp.WalkedDistance_list),
xp.success_rate,
np.mean(xp.MaxtrackingError_list),
np.mean(xp.DurationOfTheExperiment_list),
np.mean(xp.EnergyOfMotors_list),
np.mean(xp.EnergyOfWalking_list),
np.mean(xp.CostOfTransport_list),
np.mean(xp.MechaCostOfTransport_list),
np.mean(xp.Froude_list),
xp.algo,
xp.setup,
nb_of_xp))
print "success rate for ",xp.algo," ", xp.setup," : ",xp.success_rate
print "xp.WalkedDistance_list :", xp.WalkedDistance_list
else :
print "!!!!! no usable value in this xp : ", xp.algo, " ", xp.setup
xp_index_to_rm.append(xp_list.index(xp))
#print "nb_of_xp : ", nb_of_xp
#remove xp with no valid trials:
for index in reversed(xp_index_to_rm) :
print "removed xp : ",xp_list[index].algo," ",xp_list[index].setup
xp_list.pop(index)
for idx,xp in enumerate(xp_list):
print xp.algo," ",xp.setup
#if xp.algo=="15cm" and xp.setup=="10°C":
# print "success rate for 15cm 10deg : ", list_mean_xp[idx]
#print "list_mean_xp : ",list_mean_xp
return list_mean_xp
def rm_absurd_values(xp):
#remove pushes
'''if xp.setup == "Psh":
print "+ experiment ", xp.algo, " ", xp.setup, " removed"
return True'''
absurd_index_list = []
# remove trials with null walked distance
if xp.algo=="kawada":
pass
else :
for distance in (xp.WalkedDistance_list):
if distance == 0 :
absurd_index_list.append(xp.WalkedDistance_list.index(distance))
print "+ experiment will be removed in ", xp.algo, " ", xp.setup
print "+ walked distance is 0 , index : ",absurd_index_list[-1]
for absurd_index in reversed(absurd_index_list):
xp.WalkedDistance_list.pop(absurd_index)
xp.Fall_list.pop(absurd_index)
xp.MaxtrackingError_list.pop(absurd_index)
xp.DurationOfTheExperiment_list.pop(absurd_index)
xp.EnergyOfMotors_list.pop(absurd_index)
xp.EnergyOfWalking_list.pop(absurd_index)
xp.CostOfTransport_list.pop(absurd_index)
xp.MechaCostOfTransport_list.pop(absurd_index)
xp.Froude_list.pop(absurd_index)
if len(xp.WalkedDistance_list) == 0:
return True # skip_this_xp
# remove trials duration over 200s
absurd_index_list = []
if xp.algo=="kawada" or xp.setup=="Slne":
pass
else :
for duration in (xp.DurationOfTheExperiment_list):
if duration > 200:
absurd_index_list.append(xp.DurationOfTheExperiment_list.index(duration))
print "absurd index : ", absurd_index_list[-1]
print "# experiment has been removed in ", xp.algo, " ", xp.setup
print "# duration over 200 (Translations and slopes excluded) : ", duration
for absurd_index in reversed(absurd_index_list):
xp.WalkedDistance_list.pop(absurd_index)
xp.Fall_list.pop(absurd_index)
xp.MaxtrackingError_list.pop(absurd_index)
xp.DurationOfTheExperiment_list.pop(absurd_index)
xp.EnergyOfMotors_list.pop(absurd_index)
xp.EnergyOfWalking_list.pop(absurd_index)
xp.CostOfTransport_list.pop(absurd_index)
xp.MechaCostOfTransport_list.pop(absurd_index)
xp.Froude_list.pop(absurd_index)
if len(xp.WalkedDistance_list)==0:
return True #skip_this_xp
#remove trials with duration far away of the others
if xp.algo=="kawada":
pass
else:
absurd_index_list = []
duration_variance = np.var(xp.DurationOfTheExperiment_list)
duration_mean = np.mean(xp.DurationOfTheExperiment_list)
#print "xp.DurationOfTheExperiment_list : ", xp.DurationOfTheExperiment_list
for duration in (xp.DurationOfTheExperiment_list) :
if abs(duration-duration_mean) > 3*sqrt(duration_variance):
absurd_index_list.append(xp.DurationOfTheExperiment_list.index(duration))
print "absurd index : ", absurd_index_list[-1]
print "* experiment has been removed in ",xp.algo," ",xp.setup
print "* duration over 3 sigma : ",abs(duration-duration_mean)," > 3 * ",sqrt(duration_variance)
for absurd_index in reversed(absurd_index_list):
xp.WalkedDistance_list.pop(absurd_index)
xp.Fall_list.pop(absurd_index)
xp.MaxtrackingError_list.pop(absurd_index)
xp.DurationOfTheExperiment_list.pop(absurd_index)
xp.EnergyOfMotors_list.pop(absurd_index)
xp.EnergyOfWalking_list.pop(absurd_index)
xp.CostOfTransport_list.pop(absurd_index)
xp.MechaCostOfTransport_list.pop(absurd_index)
xp.Froude_list.pop(absurd_index)
if len(xp.WalkedDistance_list)==0:
return True #skip_this_xp
# calculate success rate and remove xp without any success
temp_Fall_list = {i: xp.Fall_list.count(i) for i in xp.Fall_list}
print temp_Fall_list
try:
xp.success_rate = temp_Fall_list[False] / float(len(xp.Fall_list)) # hasn't fallen
except:
try:
xp.success_rate = (len(xp.WalkedDistance_list) - temp_Fall_list[True]) / float(len(xp.Fall_list))
except:
print "0 success in this xp : ", xp.algo, " ", xp.setup
return True
# remove trials where the robot has fallen
if xp.algo=="kawada" or xp.setup=="Psh"or xp.setup=="muscode" or(xp.algo=="NPG" and xp.setup=="10°C"):
pass
else :
absurd_index_list = []
for idx,fall in enumerate(xp.Fall_list):
if fall == True:
absurd_index_list.append(idx)
print "absurd index : ", absurd_index_list[-1]
print "* experiment has been removed in ", xp.algo, " ", xp.setup
print "* robot fell in this xp (walked distance) : ", xp.WalkedDistance_list[idx]
print "nb of trials to remove, len xp : ", len(absurd_index_list), " ", len(xp.Fall_list)
for absurd_index in reversed(absurd_index_list):
xp.WalkedDistance_list.pop(absurd_index)
xp.Fall_list.pop(absurd_index)
xp.MaxtrackingError_list.pop(absurd_index)
xp.DurationOfTheExperiment_list.pop(absurd_index)
xp.EnergyOfMotors_list.pop(absurd_index)
xp.EnergyOfWalking_list.pop(absurd_index)
xp.CostOfTransport_list.pop(absurd_index)
xp.MechaCostOfTransport_list.pop(absurd_index)
xp.Froude_list.pop(absurd_index)
# print "xp.DurationOfTheExperiment_list : ", xp.DurationOfTheExperiment_list
if len(xp.WalkedDistance_list) == 0:
return True # skip_this_xp
else :
return False #can continue this xp
def plot_graph(list_mean_xp,xp_list) :
xp_tmp=XP()
for key in xp_tmp.algo_dico.keys() : #loop on algo
#if key=="hwalk":
fig, ax = plt.subplots(3, 3)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.91,
wspace=0.26, hspace=0.26)
plt.suptitle("Algorithm : "+key)
if key=="kawada":
fig_list=plt.get_fignums()
plt.close(fig_list[-1])
#close_figures() ################################# to be removed
print "enter in plotting kawada"
#setup_list = [xp[-2] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo == key]
setup_list = [xp.setup for xp in xp_list if xp.algo == key]
# for setup in setup_list:
# if setup_list.count(setup)>1:
# setup_list.remove(setup)
tmp_kpi_list=[ "Max tracking error","Duration of the experiment"] #"Intensity",
fig, ax = plt.subplots(1, len(tmp_kpi_list))
plt.suptitle("Algorithm : " + key)
for k,kpi in enumerate(tmp_kpi_list):
print "KPI : ", kpi
#for setup_k in ["Translations_FB","Translations_SIDE"]:
print "setup kawada (direction) : "#, setup_k
#y_list=[xp[k+1] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo==key] # get mean values for algo
#setup_list=[xp[-2] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo==key ]#\
#and xp_list[list_mean_xp.index(xp)].setup == setup_k)] #get setup found for algo
#direction_list = [xp.direction for xp in xp_list if xp.algo == key]
y_list=[]
for idx,xp in enumerate(xp_list):
if xp.algo==key:
if kpi=="Intensity":
print "intensity : TODO"
else:
y_list.append(list_mean_xp[idx][xp.kpi_list.index(kpi)])
# direction_list=[]
# for xp in xp_list:
# if xp.algo==key:
# if xp.direction=="":
# direction_list.append(xp.setup)
# else:
# direction_list.append(xp.setup+"\n"+xp.direction)
print "setup_list", setup_list
print "y_list", y_list
nb_of_xp_list = [xp[-1] for xp in list_mean_xp if
xp_list[list_mean_xp.index(xp)].algo == key] # get number of trials for xp
y_tuple = tuple(y_list)
setup_tuple = tuple(setup_list)
N = len(y_tuple)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
# fig, ax = plt.subplots()
# ax[0, 0].plot(x, y)
rects1 = ax[k].bar(ind, y_tuple, width, color='r')
# add some text for labels, title and axes ticks
ax[k].set_ylabel(xp_tmp.dimension_list[k])
ax[k].set_title(kpi)
ax[k].set_xticks(ind)
print "setup_tuple : ", setup_tuple
ax[k].set_xticklabels(setup_tuple)
ax[k].set_ylim((ax[k].get_ylim()[0], ax[k].get_ylim()[1] * 1.1))
nb_points = len(y_list)
for rect in rects1:
height = rect.get_height()
if height > 0.1:
ax[k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2f}'.format(height)), str(nb_of_xp_list[rects1.index(rect)])),
ha='center', va='bottom')
else:
ax[k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2E}'.format(height)), str(nb_of_xp_list[rects1.index(rect)])),
ha='center', va='bottom')
plt.show(block=False)
print "end loop for kawada "
else :
jk=0 #place of subplot
for j in range(3): #1st loop place subplot
for k in range(3): #2nd loop subplot
print key," ",xp_tmp.kpi_list[jk]
y_list=[xp[jk] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo==key] # get mean values for algo
setup_list=[xp[-2] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo==key]#get setup found for algo
nb_of_xp_list=[xp[-1] for xp in list_mean_xp if xp_list[list_mean_xp.index(xp)].algo == key] # get number of trials for xp
#print "y_list",y_list
#plt.plot(y_list)
#plt.show()
y_tuple = tuple(y_list)
setup_tuple = tuple(setup_list)
N = len(y_tuple)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
#fig, ax = plt.subplots()
#ax[0, 0].plot(x, y)
rects1 = ax[j, k].bar(ind, y_tuple, width, color='r')
# add some text for labels, title and axes ticks
ax[j, k].set_ylabel(xp_tmp.dimension_list[jk])
ax[j, k].set_title(xp_tmp.kpi_list[jk])
ax[j, k].set_xticks(ind)
#print "setup_tuple : ",setup_tuple
ax[j, k].set_xticklabels(setup_tuple)
ax[j, k].set_yscale('log')
if (key=="NPG" and xp_tmp.kpi_list[jk]=="Max tracking error") or key=="hwalk":
ax[j, k].set_ylim((ax[j, k].get_ylim()[0] * 0.95, ax[j, k].get_ylim()[1] * 1.25))
elif key=="Multiple algorithms":
ax[j, k].set_ylim((ax[j, k].get_ylim()[0] * 0.95, ax[j, k].get_ylim()[1] * 1.35))
elif key=="Beam":
ax[j, k].set_ylim((ax[j, k].get_ylim()[0] * 0.95, ax[j, k].get_ylim()[1] * 1.1005))
else:
ax[j, k].set_ylim((ax[j, k].get_ylim()[0]*0.95, ax[j, k].get_ylim()[1] * 1.105))
#print "lim inf : ",ax[j, k].get_ylim()[0]," lim up : ",ax[j, k].get_ylim()[1]
nb_points = len(y_list)
autolabel(rects1,ax,j,k,nb_of_xp_list,xp_tmp.kpi_list[jk],key)
plt.show(block=False)
jk+=1
print "end loop one plot "
return
def autolabel(rects,ax,j,k,nb_of_xp_list,kpi,key):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
if height> 0.1:
if key =="hwalk" and kpi =="Mechanical energy":
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str(int(height)), str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
elif key =="hwalk" and kpi =="Total energy":
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str(int(height)), str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
else:
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2f}'.format(height)), str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
else:
if key =="hwalk" and kpi=="Froude number":
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2f}'.format(height*100)), str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
elif key =="hwalk" and kpi == "Max tracking error" :
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2f}'.format(height * 1000)), str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
else:
ax[j, k].text(rect.get_x() + rect.get_width() / 2., height,
'%s \nnb:%s' % (str('{0:.2E}'.format(height)),str(nb_of_xp_list[rects.index(rect)])),
ha='center', va='bottom')
def close_figures():
for i in plt.get_fignums():
plt.close(i)
print "all figures closed"
if __name__ == '__main__':
close_figures()
if len(sys.argv)>1:
file_name = sys.argv[1]#"/home/anthropobot/devel/EnergyComputation/_build/bin/DEBUG/results_2017_Oct_19.txt"
else :
exit(1)
header_file, header_line, list_lines_split = read_file(file_name)
xp_list = discrimin_xp(header_file, header_line, list_lines_split)
list_mean_xp = mean_xp(xp_list)
plot_graph(list_mean_xp, xp_list)
| bsd-3-clause |
chrisburr/scikit-learn | examples/svm/plot_rbf_parameters.py | 44 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
camallen/aggregation | experimental/penguins/clusterAnalysis/check2.py | 2 | 4078 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
import math
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from divisiveDBSCAN import DivisiveDBSCAN
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
print base_directory
client = pymongo.MongoClient()
db = client['penguin_2014-10-12']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
steps = [5,10,15,20]
penguins_at = {k:[] for k in steps}
alreadyThere = False
subject_index = 0
import cPickle as pickle
#to_sample = pickle.load(open(base_directory+"/Databases/sample.pickle","rb"))
import random
#for subject in collection2.find({"classification_count": 20}):
alreadyThere = True
user_markings = [] #{k:[] for k in steps}
user_ips = [] #{k:[] for k in steps}
zooniverse_id = "APZ0000mcw"
user_index = 0
for classification in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
user_index += 1
if user_index == 21:
break
per_user = []
ip = classification["user_ip"]
try:
markings_list = classification["annotations"][1]["value"]
if isinstance(markings_list,dict):
for marking in markings_list.values():
if marking["value"] in ["adult","chick"]:
x,y = (float(marking["x"]),float(marking["y"]))
user_markings.append((x,y))
user_ips.append(ip)
except (KeyError, ValueError):
#classification["annotations"]
user_index += -1
user_identified_penguins,clusters,temp = DivisiveDBSCAN(3).fit(user_markings,user_ips,debug =True)#,base_directory + "/Databases/penguins/images/"+object_id+".JPG")
#which users are in each cluster?
users_in_clusters = []
for c in clusters:
users_in_clusters.append([])
for p in c:
i = user_markings.index(p)
users_in_clusters[-1].append(user_ips[i])
X = []
Y = []
data = []
min_one = 6000
closest = None
TT = 1
for i1 in range(len(user_identified_penguins)):
for i2 in range(i1+1,len(user_identified_penguins)):
#if i1 == i2:
# continue
m1 = user_identified_penguins[i1]
m2 = user_identified_penguins[i2]
dist = math.sqrt((m1[0]-m2[0])**2+(m1[1]-m2[1])**2)
X.append(dist)
users1 = users_in_clusters[i1]
users2 = users_in_clusters[i2]
overlap = len([u for u in users1 if u in users2])
Y.append(overlap)
data.append((dist,overlap))
if (overlap == TT) and (dist < min_one):
min_one = dist
closest = (m1,m2)
#plt.plot(X,Y,'.')
#plt.show()
data.sort(key = lambda x:x[0])
#data.sort(key = lambda x:x[1])
data2 = [overlap for dist,overlap in data]
#print data2.index(0)/float(len(data2))
print data2.index(TT)/float(len(data2))
subject = collection2.find_one({"zooniverse_id": zooniverse_id})
url = subject["location"]["standard"]
fName = url.split("/")[-1]
print "http://demo.zooniverse.org/penguins/subjects/standard/"+fName
if not(os.path.isfile(base_directory + "/Databases/penguins/images/"+fName)):
#urllib.urlretrieve ("http://demo.zooniverse.org/penguins/subjects/standard/"+fName, "/home/greg/Databases/penguins/images/"+fName)
urllib.urlretrieve ("http://www.penguinwatch.org/subjects/standard/"+fName, base_directory+"/Databases/penguins/images/"+fName)
print "/home/greg/Databases/penguins/images/"+fName
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+fName)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
plt.plot((closest[0][0],closest[1][0]),(closest[0][1],closest[1][1]),color="green")
#plt.plot(X,Y,'.')
plt.show()
#plt.plot() | apache-2.0 |
WarrenWeckesser/scikits-image | doc/examples/plot_brief.py | 32 | 1879 | """
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
| bsd-3-clause |
ankurankan/pgmpy | pgmpy/tests/test_estimators/test_StructureScore.py | 2 | 5283 | import unittest
import pandas as pd
from pgmpy.models import BayesianNetwork
from pgmpy.estimators import BDeuScore, BDsScore, BicScore, K2Score
class TestBDeuScore(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.m1 = BayesianNetwork([("A", "C"), ("B", "C"), ("D", "B")])
self.m2 = BayesianNetwork([("C", "A"), ("C", "B"), ("A", "D")])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(BDeuScore(self.d1).score(self.m1), -9.907103407446435)
self.assertEqual(BDeuScore(self.d1).score(BayesianNetwork()), 0)
def test_score_titanic(self):
scorer = BDeuScore(self.titanic_data2, equivalent_sample_size=25)
titanic = BayesianNetwork([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1892.7383393910427)
titanic2 = BayesianNetwork([("Pclass", "Sex")])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
class TestBDsScore(unittest.TestCase):
def setUp(self):
"""Example taken from https://arxiv.org/pdf/1708.00689.pdf"""
self.d1 = pd.DataFrame(
data={
"X": [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Z": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
"W": [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1],
}
)
self.m1 = BayesianNetwork([("W", "X"), ("Z", "X")])
self.m1.add_node("Y")
self.m2 = BayesianNetwork([("W", "X"), ("Z", "X"), ("Y", "X")])
def test_score(self):
self.assertAlmostEqual(
BDsScore(self.d1, equivalent_sample_size=1).score(self.m1),
-36.82311976667139,
)
self.assertEqual(
BDsScore(self.d1, equivalent_sample_size=1).score(self.m2),
-45.788991276221964,
)
def tearDown(self):
del self.d1
del self.m1
del self.m2
class TestBicScore(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.m1 = BayesianNetwork([("A", "C"), ("B", "C"), ("D", "B")])
self.m2 = BayesianNetwork([("C", "A"), ("C", "B"), ("A", "D")])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(BicScore(self.d1).score(self.m1), -10.698440814229318)
self.assertEqual(BicScore(self.d1).score(BayesianNetwork()), 0)
def test_score_titanic(self):
scorer = BicScore(self.titanic_data2)
titanic = BayesianNetwork([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1896.7250012840179)
titanic2 = BayesianNetwork([("Pclass", "Sex")])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
class TestK2Score(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.m1 = BayesianNetwork([("A", "C"), ("B", "C"), ("D", "B")])
self.m2 = BayesianNetwork([("C", "A"), ("C", "B"), ("A", "D")])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(K2Score(self.d1).score(self.m1), -10.73813429536977)
self.assertEqual(K2Score(self.d1).score(BayesianNetwork()), 0)
def test_score_titanic(self):
scorer = K2Score(self.titanic_data2)
titanic = BayesianNetwork([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1891.0630673606006)
titanic2 = BayesianNetwork([("Pclass", "Sex")])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
| mit |
zooniverse/aggregation | experimental/plankton/repeats.py | 2 | 1077 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
client = pymongo.MongoClient()
db = client['plankton_2015-01-01']
classification_collection = db["plankton_classifications"]
subject_collection = db["plankton_subjects"]
user_collection = db["plankton_users"]
finished_subjects = []
for subject in subject_collection.find({"state":"complete"}):
zooniverse_id = subject["zooniverse_id"]
finished_subjects.append((zooniverse_id,subject["updated_at"]))
finished_subjects.sort(key=lambda x:x[1],reverse=True)
print len(finished_subjects)
count = 0
for zooniverse_id,date in finished_subjects[:1000]:
users_l = []
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
if "user_name" in classification:
users_l.append(classification["user_name"])
else:
users_l.append(classification["user_ip"])
if not(len(users_l) == len(list(set(users_l)))):
count += 1
print count | apache-2.0 |
DDT-INMEGEN/indicadores | indicadores/scripts/plotea_sni.py | 1 | 1036 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
niveles_sni = [ 'candidato', 'nivel I', 'nivel II', 'nivel III' ]
years = range(2007,2014)
m = [ [5, 7, 7, 8, 3, 3, 6],
[6, 7, 10, 11, 18, 19, 20],
[0, 1, 2, 2, 2, 2, 2],
[0, 0, 0, 2, 2, 3, 4] ]
# fig = plt.figure(figsize=(40,40))
plt.imshow(m, interpolation='none')
plt.xticks(range(len(years)), years)
plt.yticks(range(4),niveles_sni)
plt.colorbar()
plt.set_cmap('jet')
# plt.savefig('niveles_sni.svg')
plt.savefig('niveles_sni.png')
plazas = {'ICM A': [3, 4, 2, 6, 9, 8, 9],
'ICM B': [10, 12, 8, 10, 7, 9, 8],
'ICM C': [9, 10, 10, 10, 13, 11, 16,],
'ICM D': [1, 2, 8, 9, 10, 13, 13],
'ICM E': [1, 2, 2, 0, 1, 1, 1],
'ICM F': [0, 0, 0, 0, 3, 0, 2],}
rows = [plazas[p] for p in plazas]
plt.imshow(rows, interpolation='none')
plt.xticks(range(len(years)), years)
plt.yticks(range(6),plazas.keys())
plt.set_cmap('jet')
# plt.savefig('niveles_sni.svg')
plt.savefig('plazas.png')
| agpl-3.0 |
Razvy000/ANN_Course | save_load_nn.py | 1 | 1169 | import os
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer, MinMaxScaler
from ann import ANN
from ann_util import serialize, deserialize
# load the iris dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
# transform the dataset
xsc = MinMaxScaler(feature_range=(0, 1), copy=True)
xsc.fit(X)
ylb = LabelBinarizer()
ylb.fit(y)
# split the dataset
X_train, X_test, y_train, y_test = train_test_split(xsc.transform(X), y, random_state=0)
# load nn if exists else train
nn_fname = 'models/nn_iris_3000epochs.pickle'
if os.path.exists(nn_fname):
# load
print('loading the nn')
nn = deserialize(nn_fname)
else:
# train
print('training the nn')
nn = ANN([4, 10, 3])
nn.train(X_train, ylb.transform(y_train), 3000)
serialize(nn, nn_fname)
# predict
preds = np.array([nn.predict(example) for example in X_test])
y_pred = ylb.inverse_transform(preds)
# evaluate
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
| apache-2.0 |
grlee77/scipy | scipy/optimize/zeros.py | 12 | 50109 | import warnings
from collections import namedtuple
import operator
from . import _zeros
import numpy as np
_iter = 100
_xtol = 2e-12
_rtol = 4 * np.finfo(float).eps
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748',
'RootResults']
# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h
_ECONVERGED = 0
_ESIGNERR = -1
_ECONVERR = -2
_EVALUEERR = -3
_EINPROGRESS = 1
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
VALUEERR = 'value error'
INPROGRESS = 'No error'
flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR,
_EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS}
class RootResults:
"""Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == _ECONVERGED
self.flag = None
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def __repr__(self):
attrs = ['converged', 'flag', 'function_calls',
'iterations', 'root']
m = max(map(len, attrs)) + 1
return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
for a in attrs])
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
def _results_select(full_output, r):
"""Select from a tuple of (root, funccalls, iterations, flag)"""
x, funcalls, iterations, flag = r
if full_output:
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
return x
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None, x1=None, rtol=0.0,
full_output=False, disp=True):
"""
Find a zero of a real or complex function using the Newton-Raphson
(or secant or Halley's) method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivative `fprime2` of `func` is also provided, then Halley's method is
used.
If `x0` is a sequence with more than one item, then `newton` returns an
array, and `func` must be vectorized and return a sequence or array of the
same shape as its first argument. If `fprime` or `fprime2` is given, then
its return must also have the same shape.
Parameters
----------
func : callable
The function whose zero is wanted. It must be a function of a
single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``
are extra arguments that can be passed in the `args` parameter.
x0 : float, sequence, or ndarray
An initial estimate of the zero that should be somewhere near the
actual zero. If not scalar, then `func` must be vectorized and return
a sequence or array of the same shape as its first argument.
fprime : callable, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value. If `func` is complex-valued,
a larger `tol` is recommended as both the real and imaginary parts
of `x` contribute to ``|x - x0|``.
maxiter : int, optional
Maximum number of iterations.
fprime2 : callable, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is not None, then Halley's method
is used.
x1 : float, optional
Another estimate of the zero that should be somewhere near the
actual zero. Used if `fprime` is not provided.
rtol : float, optional
Tolerance (relative) for termination.
full_output : bool, optional
If `full_output` is False (default), the root is returned.
If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
is the root and ``r`` is a `RootResults` object.
If True and `x0` is non-scalar, the return value is ``(x, converged,
zero_der)`` (see Returns section for details).
disp : bool, optional
If True, raise a RuntimeError if the algorithm didn't converge, with
the error message containing the number of iterations and current
function value. Otherwise, the convergence status is recorded in a
`RootResults` return object.
Ignored if `x0` is not scalar.
*Note: this has little to do with displaying, however,
the `disp` keyword cannot be renamed for backwards compatibility.*
Returns
-------
root : float, sequence, or ndarray
Estimated location where function is zero.
r : `RootResults`, optional
Present if ``full_output=True`` and `x0` is scalar.
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
converged : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements converged successfully.
zero_der : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements had a zero derivative.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeros in N dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well-behaved
the actual error in the estimated zero after the nth iteration
is approximately the square (cube for Halley) of the error
after the (n-1)th step. However, the stopping criterion used
here is the step size and there is no guarantee that a zero
has been found. Consequently, the result should be verified.
Safer algorithms are brentq, brenth, ridder, and bisect,
but they all require that the root first be bracketed in an
interval where the function changes sign. The brentq algorithm
is recommended for general use in one dimensional problems
when such an interval has been found.
When `newton` is used with arrays, it is best suited for the following
types of problems:
* The initial guesses, `x0`, are all relatively the same distance from
the roots.
* Some or all of the extra arguments, `args`, are also arrays so that a
class of similar problems can be solved together.
* The size of the initial guesses, `x0`, is larger than O(100) elements.
Otherwise, a naive loop may perform as well or better than a vector.
Examples
--------
>>> from scipy import optimize
>>> import matplotlib.pyplot as plt
>>> def f(x):
... return (x**3 - 1) # only one real root at x = 1
``fprime`` is not provided, use the secant method:
>>> root = optimize.newton(f, 1.5)
>>> root
1.0000000000000016
>>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
>>> root
1.0000000000000016
Only ``fprime`` is provided, use the Newton-Raphson method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
>>> root
1.0
Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
... fprime2=lambda x: 6 * x)
>>> root
1.0
When we want to find zeros for a set of related starting values and/or
function parameters, we can provide both of those as an array of inputs:
>>> f = lambda x, a: x**3 - a
>>> fder = lambda x, a: 3 * x**2
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal(100)
>>> a = np.arange(-50, 50)
>>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200)
The above is the equivalent of solving for each value in ``(x, a)``
separately in a for-loop, just faster:
>>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
... for x0, a0 in zip(x, a)]
>>> np.allclose(vec_res, loop_res)
True
Plot the results found for all values of ``a``:
>>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(a, analytical_result, 'o')
>>> ax.plot(a, vec_res, '.')
>>> ax.set_xlabel('$a$')
>>> ax.set_ylabel('$x$ where $f(x, a)=0$')
>>> plt.show()
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
maxiter = operator.index(maxiter)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if np.size(x0) > 1:
return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
full_output)
# Convert to float (don't use float(x0); this works also for complex x0)
p0 = 1.0 * x0
funcalls = 0
if fprime is not None:
# Newton-Raphson method
for itr in range(maxiter):
# first evaluate fval
fval = func(p0, *args)
funcalls += 1
# If fval is 0, a root has been found, then terminate
if fval == 0:
return _results_select(
full_output, (p0, funcalls, itr, _ECONVERGED))
fder = fprime(p0, *args)
funcalls += 1
if fder == 0:
msg = "Derivative was zero."
if disp:
msg += (
" Failed to converge after %d iterations, value is %s."
% (itr + 1, p0))
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
return _results_select(
full_output, (p0, funcalls, itr + 1, _ECONVERR))
newton_step = fval / fder
if fprime2:
fder2 = fprime2(p0, *args)
funcalls += 1
# Halley's method:
# newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
# Only do it if denominator stays close enough to 1
# Rationale: If 1-adj < 0, then Halley sends x in the
# opposite direction to Newton. Doesn't happen if x is close
# enough to root.
adj = newton_step * fder2 / fder / 2
if np.abs(adj) < 1:
newton_step /= 1.0 - adj
p = p0 - newton_step
if np.isclose(p, p0, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
p0 = p
else:
# Secant method
if x1 is not None:
if x1 == x0:
raise ValueError("x1 and x0 must be different")
p1 = x1
else:
eps = 1e-4
p1 = x0 * (1 + eps)
p1 += (eps if p1 >= 0 else -eps)
q0 = func(p0, *args)
funcalls += 1
q1 = func(p1, *args)
funcalls += 1
if abs(q1) < abs(q0):
p0, p1, q0, q1 = p1, p0, q1, q0
for itr in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached." % (p1 - p0)
if disp:
msg += (
" Failed to converge after %d iterations, value is %s."
% (itr + 1, p1))
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
p = (p1 + p0) / 2.0
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
else:
if abs(q1) > abs(q0):
p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
else:
p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
if np.isclose(p, p1, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED))
p0, q0 = p1, q1
p1 = p
q1 = func(p1, *args)
funcalls += 1
if disp:
msg = ("Failed to converge after %d iterations, value is %s."
% (itr + 1, p))
raise RuntimeError(msg)
return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))
def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = np.array(x0, copy=True)
failures = np.ones_like(p, dtype=bool)
nz_der = np.ones_like(failures)
if fprime is not None:
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = np.asarray(func(p, *args))
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = np.asarray(fprime(p, *args))
nz_der = (fder != 0)
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
if fprime2 is not None:
fder2 = np.asarray(fprime2(p, *args))
dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
# only update nonzero derivatives
p = np.asarray(p, dtype=np.result_type(p, dp, np.float64))
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# Secant method
dx = np.finfo(float).eps**0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = np.asarray(func(p, *args))
q1 = np.asarray(func(p1, *args))
active = np.ones_like(p, dtype=bool)
for iteration in range(maxiter):
nz_der = (q1 != q0)
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64))
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = np.asarray(func(p1, *args))
zero_der = ~nz_der & failures # don't include converged with zero-ders
if zero_der.any():
# Secant warnings
if fprime is None:
nonzero_dp = (p1 != p)
# non-zero dp, but infinite newton step
zero_der_nz_dp = (zero_der & nonzero_dp)
if zero_der_nz_dp.any():
rms = np.sqrt(
sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
)
warnings.warn(
'RMS of {:g} reached'.format(rms), RuntimeWarning)
# Newton or Halley warnings
else:
all_or_some = 'all' if zero_der.all() else 'some'
msg = '{:s} derivatives were zero'.format(all_or_some)
warnings.warn(msg, RuntimeWarning)
elif failures.any():
all_or_some = 'all' if failures.all() else 'some'
msg = '{0:s} failed to converge after {1:d} iterations'.format(
all_or_some, maxiter
)
if failures.all():
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
if full_output:
result = namedtuple('result', ('root', 'converged', 'zero_der'))
p = result(p, ~failures, zero_der)
return p
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval using bisection.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in a `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.bisect(f, 0, 2)
>>> root
1.0
>>> root = optimize.bisect(f, -2, 0)
>>> root
-1.0
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval using Ridder's method.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : 1-D root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent routines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.ridder(f, 0, 2)
>>> root
1.0
>>> root = optimize.ridder(f, -2, 0)
>>> root
-1.0
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. A third description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : scalar
One end of the bracketing interval :math:`[a, b]`.
b : scalar
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
Related functions fall into several classes:
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
N-D root-finding
`fsolve`
1-D root-finding
`brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brentq(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brentq(f, 0, 2)
>>> root
1.0
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find a root of a function in a bracketing interval using Brent's
method with hyperbolic extrapolation.
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally, on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. As with `brentq`, for nice
functions the method will often satisfy the above condition
with ``xtol/2`` and ``rtol/2``.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. As with `brentq`, for nice functions
the method will often satisfy the above condition with
``xtol/2`` and ``rtol/2``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brenth(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brenth(f, 0, 2)
>>> root
1.0
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : N-D root-finding
brentq, brenth, ridder, bisect, newton : 1-D root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
################################
# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
# Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
# See [1]
def _notclose(fs, rtol=_rtol, atol=_xtol):
# Ensure not None, not 0, all finite, and not very close to each other
notclosefvals = (
all(fs) and all(np.isfinite(fs)) and
not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
for i, _f in enumerate(fs[:-1])))
return notclosefvals
def _secant(xvals, fvals):
"""Perform a secant step, taking a little care"""
# Secant has many "mathematically" equivalent formulations
# x2 = x0 - (x1 - x0)/(f1 - f0) * f0
# = x1 - (x1 - x0)/(f1 - f0) * f1
# = (-x1 * f0 + x0 * f1) / (f1 - f0)
# = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
# = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
x0, x1 = xvals[:2]
f0, f1 = fvals[:2]
if f0 == f1:
return np.nan
if np.abs(f1) > np.abs(f0):
x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
else:
x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
return x2
def _update_bracket(ab, fab, c, fc):
"""Update a bracket given (c, fc), return the discarded endpoints."""
fa, fb = fab
idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
rx, rfx = ab[idx], fab[idx]
fab[idx] = fc
ab[idx] = c
return rx, rfx
def _compute_divided_differences(xvals, fvals, N=None, full=True,
forward=True):
"""Return a matrix of divided differences for the xvals, fvals pairs
DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
If full is False, just return the main diagonal(or last row):
f[a], f[a, b] and f[a, b, c].
If forward is False, return f[c], f[b, c], f[a, b, c]."""
if full:
if forward:
xvals = np.asarray(xvals)
else:
xvals = np.array(xvals)[::-1]
M = len(xvals)
N = M if N is None else min(N, M)
DD = np.zeros([M, N])
DD[:, 0] = fvals[:]
for i in range(1, N):
DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /
(xvals[i:] - xvals[:M - i]))
return DD
xvals = np.asarray(xvals)
dd = np.array(fvals)
row = np.array(fvals)
idx2Use = (0 if forward else -1)
dd[0] = fvals[idx2Use]
for i in range(1, len(xvals)):
denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
row = np.diff(row)[:] / denom
dd[i] = row[idx2Use]
return dd
def _interpolated_poly(xvals, fvals, x):
"""Compute p(x) for the polynomial passing through the specified locations.
Use Neville's algorithm to compute p(x) where p is the minimal degree
polynomial passing through the points xvals, fvals"""
xvals = np.asarray(xvals)
N = len(xvals)
Q = np.zeros([N, N])
D = np.zeros([N, N])
Q[:, 0] = fvals[:]
D[:, 0] = fvals[:]
for k in range(1, N):
alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
diffik = xvals[0:N - k] - xvals[k:N]
Q[k:, k] = (xvals[k:] - x) / diffik * alpha
D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
# Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
return np.sum(Q[-1, 1:]) + Q[-1, 0]
def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
"""Inverse cubic interpolation f-values -> x-values
Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
and compute x=IP(0).
"""
return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
def _newton_quadratic(ab, fab, d, fd, k):
"""Apply Newton-Raphson like steps, using divided differences to approximate f'
ab is a real interval [a, b] containing a root,
fab holds the real values of f(a), f(b)
d is a real number outside [ab, b]
k is the number of steps to apply
"""
a, b = ab
fa, fb = fab
_, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
forward=True, full=False)
# _P is the quadratic polynomial through the 3 points
def _P(x):
# Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
return (A * (x - b) + B) * (x - a) + fa
if A == 0:
r = a - fa / B
else:
r = (a if np.sign(A) * np.sign(fa) > 0 else b)
# Apply k Newton-Raphson steps to _P(x), starting from x=r
for i in range(k):
r1 = r - _P(r) / (B + A * (2 * r - a - b))
if not (ab[0] < r1 < ab[1]):
if (ab[0] < r < ab[1]):
return r
r = sum(ab) / 2.0
break
r = r1
return r
class TOMS748Solver:
"""Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.
"""
_MU = 0.5
_K_MIN = 1
_K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3.
def __init__(self):
self.f = None
self.args = None
self.function_calls = 0
self.iterations = 0
self.k = 2
# ab=[a,b] is a global interval containing a root
self.ab = [np.nan, np.nan]
# fab is function values at a, b
self.fab = [np.nan, np.nan]
self.d = None
self.fd = None
self.e = None
self.fe = None
self.disp = False
self.xtol = _xtol
self.rtol = _rtol
self.maxiter = _iter
def configure(self, xtol, rtol, maxiter, disp, k):
self.disp = disp
self.xtol = xtol
self.rtol = rtol
self.maxiter = maxiter
# Silently replace a low value of k with 1
self.k = max(k, self._K_MIN)
# Noisily replace a high value of k with self._K_MAX
if self.k > self._K_MAX:
msg = "toms748: Overriding k: ->%d" % self._K_MAX
warnings.warn(msg, RuntimeWarning)
self.k = self._K_MAX
def _callf(self, x, error=True):
"""Call the user-supplied function, update book-keeping"""
fx = self.f(x, *self.args)
self.function_calls += 1
if not np.isfinite(fx) and error:
raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx))
return fx
def get_result(self, x, flag=_ECONVERGED):
r"""Package the result and statistics into a tuple."""
return (x, self.function_calls, self.iterations, flag)
def _update_bracket(self, c, fc):
return _update_bracket(self.ab, self.fab, c, fc)
def start(self, f, a, b, args=()):
r"""Prepare for the iterations."""
self.function_calls = 0
self.iterations = 0
self.f = f
self.args = args
self.ab[:] = [a, b]
if not np.isfinite(a) or np.imag(a) != 0:
raise ValueError("Invalid x value: %s " % (a))
if not np.isfinite(b) or np.imag(b) != 0:
raise ValueError("Invalid x value: %s " % (b))
fa = self._callf(a)
if not np.isfinite(fa) or np.imag(fa) != 0:
raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
if fa == 0:
return _ECONVERGED, a
fb = self._callf(b)
if not np.isfinite(fb) or np.imag(fb) != 0:
raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
if fb == 0:
return _ECONVERGED, b
if np.sign(fb) * np.sign(fa) > 0:
raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
(a, fa, b, fb))
self.fab[:] = [fa, fb]
return _EINPROGRESS, sum(self.ab) / 2.0
def get_status(self):
"""Determine the current status."""
a, b = self.ab[:2]
if np.isclose(a, b, rtol=self.rtol, atol=self.xtol):
return _ECONVERGED, sum(self.ab) / 2.0
if self.iterations >= self.maxiter:
return _ECONVERR, sum(self.ab) / 2.0
return _EINPROGRESS, sum(self.ab) / 2.0
def iterate(self):
"""Perform one step in the algorithm.
Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
"""
self.iterations += 1
eps = np.finfo(float).eps
d, fd, e, fe = self.d, self.fd, self.e, self.fe
ab_width = self.ab[1] - self.ab[0] # Need the start width below
c = None
for nsteps in range(2, self.k+2):
# If the f-values are sufficiently separated, perform an inverse
# polynomial interpolation step. Otherwise, nsteps repeats of
# an approximate Newton-Raphson step.
if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
self.fab[0], self.fab[1], fd, fe)
if self.ab[0] < c0 < self.ab[1]:
c = c0
if c is None:
c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)
fc = self._callf(c)
if fc == 0:
return _ECONVERGED, c
# re-bracket
e, fe = d, fd
d, fd = self._update_bracket(c, fc)
# u is the endpoint with the smallest f-value
uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
u, fu = self.ab[uix], self.fab[uix]
_, A = _compute_divided_differences(self.ab, self.fab,
forward=(uix == 0), full=False)
c = u - 2 * fu / A
if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
c = sum(self.ab) / 2.0
else:
if np.isclose(c, u, rtol=eps, atol=0):
# c didn't change (much).
# Either because the f-values at the endpoints have vastly
# differing magnitudes, or because the root is very close to
# that endpoint
frs = np.frexp(self.fab)[1]
if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50
c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
else:
# Make a bigger adjustment, about the
# size of the requested tolerance.
mm = (1 if uix == 0 else -1)
adj = mm * np.abs(c) * self.rtol + mm * self.xtol
c = u + adj
if not self.ab[0] < c < self.ab[1]:
c = sum(self.ab) / 2.0
fc = self._callf(c)
if fc == 0:
return _ECONVERGED, c
e, fe = d, fd
d, fd = self._update_bracket(c, fc)
# If the width of the new interval did not decrease enough, bisect
if self.ab[1] - self.ab[0] > self._MU * ab_width:
e, fe = d, fd
z = sum(self.ab) / 2.0
fz = self._callf(z)
if fz == 0:
return _ECONVERGED, z
d, fd = self._update_bracket(z, fz)
# Record d and e for next iteration
self.d, self.fd = d, fd
self.e, self.fe = e, fe
status, xn = self.get_status()
return status, xn
def solve(self, f, a, b, args=(),
xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):
r"""Solve f(x) = 0 given an interval containing a zero."""
self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)
status, xn = self.start(f, a, b, args)
if status == _ECONVERGED:
return self.get_result(xn)
# The first step only has two x-values.
c = _secant(self.ab, self.fab)
if not self.ab[0] < c < self.ab[1]:
c = sum(self.ab) / 2.0
fc = self._callf(c)
if fc == 0:
return self.get_result(c)
self.d, self.fd = self._update_bracket(c, fc)
self.e, self.fe = None, None
self.iterations += 1
while True:
status, xn = self.iterate()
if status == _ECONVERGED:
return self.get_result(xn)
if status == _ECONVERR:
fmt = "Failed to converge after %d iterations, bracket is %s"
if disp:
msg = fmt % (self.iterations + 1, self.ab)
raise RuntimeError(msg)
return self.get_result(xn, _ECONVERR)
def toms748(f, a, b, args=(), k=1,
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a zero using TOMS Algorithm 748 method.
Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a
zero of the function `f` on the interval `[a , b]`, where `f(a)` and
`f(b)` must have opposite signs.
It uses a mixture of inverse cubic interpolation and
"Newton-quadratic" steps. [APS1995].
Parameters
----------
f : function
Python function returning a scalar. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)`
have opposite signs.
a : scalar,
lower boundary of the search interval
b : scalar,
upper boundary of the search interval
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``f(x, *args)``.
k : int, optional
The number of Newton quadratic steps to perform each
iteration. ``k>=1``.
xtol : scalar, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : scalar, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in the `RootResults`
return object.
Returns
-------
x0 : float
Approximate Zero of `f`
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, ridder, bisect, newton
fsolve : find zeroes in N dimensions.
Notes
-----
`f` must be continuous.
Algorithm 748 with ``k=2`` is asymptotically the most efficient
algorithm known for finding roots of a four times continuously
differentiable function.
In contrast with Brent's algorithm, which may only decrease the length of
the enclosing bracket on the last step, Algorithm 748 decreases it each
iteration with the same asymptotic efficiency as it finds the root.
For easy statement of efficiency indices, assume that `f` has 4
continuouous deriviatives.
For ``k=1``, the convergence order is at least 2.7, and with about
asymptotically 2 function evaluations per iteration, the efficiency
index is approximately 1.65.
For ``k=2``, the order is about 4.6 with asymptotically 3 function
evaluations per iteration, and the efficiency index 1.66.
For higher values of `k`, the efficiency index approaches
the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are
usually appropriate.
References
----------
.. [APS1995]
Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
*Algorithm 748: Enclosing Zeros of Continuous Functions*,
ACM Trans. Math. Softw. Volume 221(1995)
doi = {10.1145/210089.210111}
Examples
--------
>>> def f(x):
... return (x**3 - 1) # only one real root at x = 1
>>> from scipy import optimize
>>> root, results = optimize.toms748(f, 0, 2, full_output=True)
>>> root
1.0
>>> results
converged: True
flag: 'converged'
function_calls: 11
iterations: 5
root: 1.0
"""
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol / 4:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
maxiter = operator.index(maxiter)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if not np.isfinite(a):
raise ValueError("a is not finite %s" % a)
if not np.isfinite(b):
raise ValueError("b is not finite %s" % b)
if a >= b:
raise ValueError("a and b are not an interval [{}, {}]".format(a, b))
if not k >= 1:
raise ValueError("k too small (%s < 1)" % k)
if not isinstance(args, tuple):
args = (args,)
solver = TOMS748Solver()
result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,
maxiter=maxiter, disp=disp)
x, function_calls, iterations, flag = result
return _results_select(full_output, (x, function_calls, iterations, flag))
| bsd-3-clause |
abimannans/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
MTgeophysics/mtpy | legacy/modem_new.py | 1 | 313405 | #!/usr/bin/env python
"""
==================
ModEM
==================
# Generate data file for ModEM
# by Paul Soeffky 2013
# revised by LK 2014
# revised by JP 2014
# edited by AK 2016
"""
import os
import matplotlib.cm as cm
import matplotlib.colorbar as mcb
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
import numpy as np
import scipy.interpolate as spi
import scipy.stats as stats
from matplotlib.colors import Normalize
from matplotlib.patches import Ellipse
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import mtpy.analysis.pt as mtpt
import mtpy.core.mt as mt
import mtpy.core.z as mtz
import mtpy.imaging.mtcolors as mtcl
import mtpy.imaging.mtplottools as mtplottools
import mtpy.modeling.ws3dinv as ws
import mtpy.utils.exceptions as mtex
import mtpy.utils.gis_tools
try:
from evtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
epsg_dict = {28350:['+proj=utm +zone=50 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',50],
28351:['+proj=utm +zone=51 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',51],
28352:['+proj=utm +zone=52 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',52],
28353:['+proj=utm +zone=53 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',53],
28354:['+proj=utm +zone=54 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',54],
28355:['+proj=utm +zone=55 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',55],
28356:['+proj=utm +zone=56 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',56],
3112:['+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',0],
4326:['+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs',0],
4204:['+proj=longlat +ellps=intl +no_defs', 0]}
#==============================================================================
class Data(object):
"""
Data will read and write .dat files for ModEM and convert a WS data file
to ModEM format.
..note: :: the data is interpolated onto the given periods such that all
stations invert for the same periods. The interpolation is
a linear interpolation of each of the real and imaginary parts
of the impedance tensor and induction tensor.
See mtpy.core.mt.MT.interpolate for more details
Arguments
------------
**edi_list** : list
list of full paths to .edi files you want to invert for
====================== ====================================================
Attributes/Key Words Description
====================== ====================================================
_dtype internal variable defining the data type of
data_array
_t_shape internal variable defining shape of tipper array in
_dtype
_z_shape internal variable defining shape of Z array in
_dtype
center_position (east, north, evel) for center point of station
array. All stations are relative to this location
for plotting purposes.
comp_index_dict dictionary for index values of component of Z and T
station_locations numpy.ndarray structured to store station
location values. Keys are:
* station --> station name
* east --> UTM east (m)
* north --> UTM north (m)
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* zone --> UTM zone
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
data_array numpy.ndarray (num_stations) structured to store
data. keys are:
* station --> station name
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* east --> UTM east (m)
* north --> UTM north (m)
* zone --> UTM zone
* z --> impedance tensor array with shape
(num_freq, 2, 2)
* z_err --> impedance tensor error array with
shape (num_freq, 2, 2)
* tip --> Tipper array with shape
(num_freq, 1, 2)
* tipperr --> Tipper array with shape
(num_freq, 1, 2)
data_fn full path to data file
data_period_list period list from all the data
edi_list list of full paths to edi files
error_egbert percentage to multiply sqrt(Z_xy*Zyx) by.
*default* is 3 as prescribed by Egbert & Kelbert
error_floor percentage to set the error floor at, anything below
this number will be set to error_floor.
*default* is 10
error_tipper absolute tipper error, all tipper error will be
set to this value unless you specify error_type as
'floor' or 'floor_egbert'.
*default* is .05 for 5%
error_type [ 'floor' | 'value' | 'egbert' ]
*default* is 'egbert'
* 'floor' sets the error floor to error_floor
* 'value' sets error to error_value
* 'egbert' sets error to
error_egbert * sqrt(abs(zxy*zyx))
* 'floor_egbert' sets error floor to
error_egbert * sqrt(abs(zxy*zyx))
error_value percentage to multiply Z by to set error
*default* is 5 for 5% of Z as error
fn_basename basename of data file. *default* is 'ModEM_Data.dat'
header_strings strings for header of data file following the format
outlined in the ModEM documentation
inv_comp_dict dictionary of inversion componets
inv_mode inversion mode, options are: *default* is '1'
* '1' --> for 'Full_Impedance' and
'Full_Vertical_Components'
* '2' --> 'Full_Impedance'
* '3' --> 'Off_Diagonal_Impedance' and
'Full_Vertical_Components'
* '4' --> 'Off_Diagonal_Impedance'
* '5' --> 'Full_Vertical_Components'
* '6' --> 'Full_Interstation_TF'
* '7' --> 'Off_Diagonal_Rho_Phase'
inv_mode_dict dictionary for inversion modes
max_num_periods maximum number of periods
mt_dict dictionary of mtpy.core.mt.MT objects with keys
being station names
period_dict dictionary of period index for period_list
period_list list of periods to invert for
period_max maximum value of period to invert for
period_min minimum value of period to invert for
rotate_angle Angle to rotate data to assuming 0 is N and E is 90
save_path path to save data file to
units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z
*default* is [mV/km]/[nT]
wave_sign [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
====================== ====================================================
========================== ================================================
Methods Description
========================== ================================================
convert_ws3dinv_data_file convert a ws3dinv file to ModEM fomrat,
**Note** this doesn't include tipper data and
you need a station location file like the one
output by mtpy.modeling.ws3dinv
get_data_from_edi get data from given .edi files and fill
attributes accordingly
get_mt_dict get a dictionary of mtpy.core.mt.MT objects
with keys being station names
get_period_list get a list of periods to invert for
get_station_locations get station locations and relative locations
filling in station_locations
read_data_file read in a ModEM data file and fill attributes
data_array, station_locations, period_list, mt_dict
write_data_file write a ModEM data file
========================== ================================================
:Example 1 --> create inversion period list: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 2 --> set inverions period list from data: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list)
>>> #get period list from an .edi file
>>> mt_obj1 = modem.mt.MT(edi_list[0])
>>> inv_period_list = 1./mt_obj1.Z.freq
>>> #invert for every third period in inv_period_list
>>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]
>>> md.period_list = inv_period_list
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 3 --> change error values: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.error_type = 'floor'
>>> mdr.error_floor = 10
>>> mdr.error_tipper = .03
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 4 --> change inversion type: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.inv_mode = '3'
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 5 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
import mtpy.modeling.ModEM >>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
import mtpy.modeling.ModEM
>>> mmesh = mtpy.modeling.ModEM.Model(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> mmesh.plot_mesh()
>>> # all is good write the mesh file
>>> mmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = mtpy.modeling.ModEM.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 6 --> rotate data: ::
>>> md.rotation_angle = 60
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> # or
>>> md.write_data_file(save_path=r"/home/modem/Inv1", \
rotation_angle=60)
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
self.error_type = kwargs.pop('error_type', 'egbert')
self.error_floor = kwargs.pop('error_floor', 5.0)
self.error_value = kwargs.pop('error_value', 5.0)
self.error_egbert = kwargs.pop('error_egbert', 3.0)
self.error_tipper = kwargs.pop('error_tipper', .05)
self.wave_sign_impedance = kwargs.pop('wave_sign_impedance', '+')
self.wave_sign_tipper = kwargs.pop('wave_sign_tipper', '+')
self.units = kwargs.pop('units', '[mV/km]/[nT]')
self.inv_mode = kwargs.pop('inv_mode', '1')
self.period_list = kwargs.pop('period_list', None)
self.period_step = kwargs.pop('period_step', 1)
self.period_min = kwargs.pop('period_min', None)
self.period_max = kwargs.pop('period_max', None)
self.period_buffer = kwargs.pop('period_buffer', None)
self.max_num_periods = kwargs.pop('max_num_periods', None)
self.data_period_list = None
self.fn_basename = kwargs.pop('fn_basename', 'ModEM_Data.dat')
self.save_path = kwargs.pop('save_path', os.getcwd())
self.formatting = kwargs.pop('format', '1')
self._rotation_angle = kwargs.pop('rotation_angle', 0.0)
self._set_rotation_angle(self._rotation_angle)
self._station_locations = None
self.center_position = np.array([0.0, 0.0])
self.epsg = kwargs.pop('epsg',None)
self.data_array = None
self.mt_dict = None
self.data_fn = kwargs.pop('data_fn','ModEM_Data.dat')
self._z_shape = (1, 2, 2)
self._t_shape = (1, 1, 2)
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
self.inv_mode_dict = {'1':['Full_Impedance', 'Full_Vertical_Components'],
'2':['Full_Impedance'],
'3':['Off_Diagonal_Impedance',
'Full_Vertical_Components'],
'4/g/data/ha3/fxz547/Githubz/mtpy2/examples/data/ModEM_files/VicSynthetic07/Modular_MPI_NLCG_019.rho':['Off_Diagonal_Impedance'],
'5':['Full_Vertical_Components'],
'6':['Full_Interstation_TF'],
'7':['Off_Diagonal_Rho_Phase']}
self.inv_comp_dict = {'Full_Impedance':['zxx', 'zxy', 'zyx', 'zyy'],
'Off_Diagonal_Impedance':['zxy', 'zyx'],
'Full_Vertical_Components':['tx', 'ty']}
self.comp_index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0),
'zyy':(1, 1), 'tx':(0, 0), 'ty':(0, 1)}
self.header_strings = \
['# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f} deg clockwise from N\n'.format(
self.error_type, self.error_floor, self._rotation_angle),
'# Period(s) Code GG_Lat GG_Lon X(m) Y(m) Z(m) Component Real Imag Error\n']
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
def _set_dtype(self, z_shape, t_shape):
"""
reset dtype
"""
self._z_shape = z_shape
self._t_shape = t_shape
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
def _set_header_string(self):
"""
reset the header sring for file
"""
h_str = '# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f}_deg clockwise from N\n'
if self.error_type == 'egbert':
self.header_strings[0] = h_str.format(self.error_type,
self.error_egbert,
self._rotation_angle)
elif self.error_type == 'floor':
self.header_strings[0] = h_str.format(self.error_type,
self.error_floor,
self._rotation_angle)
elif self.error_type == 'value':
self.header_strings[0] = h_str.format(self.error_type,
self.error_value,
self._rotation_angle)
def get_mt_dict(self):
"""
get mt_dict from edi file list
"""
if self.edi_list is None:
raise ModEMError('edi_list is None, please input a list of '
'.edi files containing the full path')
if len(self.edi_list) == 0:
raise ModEMError('edi_list is empty, please input a list of '
'.edi files containing the full path' )
self.mt_dict = {}
for edi in self.edi_list:
mt_obj = mt.MT(edi)
self.mt_dict[mt_obj.station] = mt_obj
def get_relative_station_locations(self):
"""
get station locations from edi files
"""
utm_zones_dict = {'M':9, 'L':8, 'K':7, 'J':6, 'H':5, 'G':4, 'F':3,
'E':2, 'D':1, 'C':0, 'N':10, 'P':11, 'Q':12, 'R':13,
'S':14, 'T':15, 'U':16, 'V':17, 'W':18, 'X':19}
# get center position of the stations in lat and lon
self.center_position[0] = self.data_array['lat'].mean()
self.center_position[1] = self.data_array['lon'].mean()
#--> need to convert lat and lon to east and north
for c_arr in self.data_array:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'], c_arr['east'], c_arr['north'] = \
mtpy.utils.gis_tools.ll_to_utm(self._utm_ellipsoid,
c_arr['lat'],
c_arr['lon'])
#--> need to check to see if all stations are in the same zone
utm_zone_list = list(set(self.data_array['zone']))
#if there are more than one zone, figure out which zone is the odd ball
utm_zone_dict = dict([(utmzone, 0) for utmzone in utm_zone_list])
if len(utm_zone_list) != 1:
self._utm_cross = True
for c_arr in self.data_array:
utm_zone_dict[c_arr['zone']] += 1
#flip keys and values so the key is the number of zones and
# the value is the utm zone
utm_zone_dict = dict([(utm_zone_dict[key], key)
for key in utm_zone_dict.keys()])
#get the main utm zone as the one with the most stations in it
main_utm_zone = utm_zone_dict[max(utm_zone_dict.keys())]
#Get a list of index values where utm zones are not the
#same as the main zone
diff_zones = np.where(self.data_array['zone'] != main_utm_zone)[0]
for c_index in diff_zones:
c_arr = self.data_array[c_index]
c_utm_zone = c_arr['zone']
print '{0} utm_zone is {1} and does not match {2}'.format(
c_arr['station'], c_arr['zone'], main_utm_zone)
zone_shift = 1-abs(utm_zones_dict[c_utm_zone[-1]]-\
utm_zones_dict[main_utm_zone[-1]])
#--> check to see if the zone is in the same latitude
#if odd ball zone is north of main zone, add 888960 m
if zone_shift > 1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> adding {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] += north_shift
#if odd ball zone is south of main zone, subtract 88960 m
elif zone_shift < -1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> subtracting {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] -= north_shift
#--> if zone is shifted east or west
if int(c_utm_zone[0:-1]) > int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> adding {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] += east_shift
elif int(c_utm_zone[0:-1]) < int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> subtracting {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] -= east_shift
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] = self.data_array['east']-\
self.data_array['east'].mean()
self.data_array['rel_north'] = self.data_array['north']-\
self.data_array['north'].mean()
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.rotation_angle))
sin_ang = np.sin(np.deg2rad(self.rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.data_array['rel_east'],
self.data_array['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.data_array['rel_east'][:] = new_coords[0, :]
self.data_array['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
self.rotation_angle)
#translate the stations so they are relative to 0,0
east_center = (self.data_array['rel_east'].max()-
np.abs(self.data_array['rel_east'].min()))/2
north_center = (self.data_array['rel_north'].max()-
np.abs(self.data_array['rel_north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] -= east_center
self.data_array['rel_north'] -= north_center
def get_period_list(self):
"""
make a period list to invert for
"""
if self.mt_dict is None:
self.get_mt_dict()
if self.period_list is not None:
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
return
data_period_list = []
for s_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[s_key]
data_period_list.extend(list(1./mt_obj.Z.freq))
self.data_period_list = np.array(sorted(list(set(data_period_list)),
reverse=False))
if self.period_min is not None:
if self.period_max is None:
raise ModEMError('Need to input period_max')
if self.period_max is not None:
if self.period_min is None:
raise ModEMError('Need to input period_min')
if self.period_min is not None and self.period_max is not None:
if self.max_num_periods is None:
raise ModEMError('Need to input number of periods to use')
min_index = np.where(self.data_period_list >= self.period_min)[0][0]
max_index = np.where(self.data_period_list <= self.period_max)[0][-1]
pmin = np.log10(self.data_period_list[min_index])
pmax = np.log10(self.data_period_list[max_index])
self.period_list = np.logspace(pmin, pmax, num=self.max_num_periods)
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
if self.period_list is None:
raise ModEMError('Need to input period_min, period_max, '
'max_num_periods or a period_list')
def _set_rotation_angle(self, rotation_angle):
"""
on set rotation angle rotate mt_dict and data_array,
"""
if self._rotation_angle == rotation_angle:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = -self._rotation_angle+rotation_angle
if self.rotation_angle == 0:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = rotation_angle
if self.data_array is None:
return
if self.mt_dict is None:
return
for mt_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[mt_key]
mt_obj.Z.rotate(self._rotation_angle)
mt_obj.Tipper.rotate(self._rotation_angle)
print 'Data rotated to align with {0:.1f} deg clockwise from N'.format(
self._rotation_angle)
print '*'*70
print ' If you want to rotate station locations as well use the'
print ' command Data.get_relative_station_locations() '
print ' if stations have not already been rotated in Model'
print '*'*70
self._fill_data_array()
def _get_rotation_angle(self):
return self._rotation_angle
rotation_angle = property(fget=_get_rotation_angle,
fset=_set_rotation_angle,
doc="""Rotate data assuming N=0, E=90""")
def _fill_data_array(self):
"""
fill the data array from mt_dict
"""
if self.period_list is None:
self.get_period_list()
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
d_array = False
if self.data_array is not None:
d_arr_copy = self.data_array.copy()
d_array = True
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
rel_distance = True
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
if d_array is True:
try:
d_index = np.where(d_arr_copy['station'] == s_key)[0][0]
self.data_array[ii]['station'] = s_key
self.data_array[ii]['lat'] = d_arr_copy[d_index]['lat']
self.data_array[ii]['lon'] = d_arr_copy[d_index]['lon']
self.data_array[ii]['east'] = d_arr_copy[d_index]['east']
self.data_array[ii]['north'] = d_arr_copy[d_index]['north']
self.data_array[ii]['elev'] = d_arr_copy[d_index]['elev']
self.data_array[ii]['rel_east'] = d_arr_copy[d_index]['rel_east']
self.data_array[ii]['rel_north'] = d_arr_copy[d_index]['rel_north']
except IndexError:
print 'Could not find {0} in data_array'.format(s_key)
else:
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.elev
try:
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
rel_distance = False
except AttributeError:
pass
# interpolate each station onto the period list
# check bounds of period list
interp_periods = self.period_list[np.where(
(self.period_list >= 1./mt_obj.Z.freq.max()) &
(self.period_list <= 1./mt_obj.Z.freq.min()))]
# if specified, apply a buffer so that interpolation doesn't stretch too far over periods
if type(self.period_buffer) in [float,int]:
interp_periods_new = []
dperiods = 1./mt_obj.Z.freq
for iperiod in interp_periods:
# find nearest data period
difference = np.abs(iperiod-dperiods)
nearestdperiod = dperiods[difference == np.amin(difference)][0]
if max(nearestdperiod/iperiod, iperiod/nearestdperiod) < self.period_buffer:
interp_periods_new.append(iperiod)
interp_periods = np.array(interp_periods_new)
interp_z, interp_t = mt_obj.interpolate(1./interp_periods)
for kk, ff in enumerate(interp_periods):
jj = np.where(self.period_list == ff)[0][0]
self.data_array[ii]['z'][jj] = interp_z.z[kk, :, :]
self.data_array[ii]['z_err'][jj] = interp_z.z_err[kk, :, :]
if mt_obj.Tipper.tipper is not None:
self.data_array[ii]['tip'][jj] = interp_t.tipper[kk, :, :]
self.data_array[ii]['tip_err'][jj] = \
interp_t.tipper_err[kk, :, :]
if rel_distance is False:
self.get_relative_station_locations()
def _set_station_locations(self, station_locations):
"""
take a station_locations array and populate data_array
"""
if self.data_array is None:
self.get_mt_dict()
self.get_period_list()
self._fill_data_array()
for s_arr in station_locations:
try:
d_index = np.where(self.data_array['station'] ==
s_arr['station'])[0][0]
except IndexError:
print 'Could not find {0} in data_array'.format(s_arr['station'])
d_index = None
if d_index is not None:
self.data_array[d_index]['lat'] = s_arr['lat']
self.data_array[d_index]['lon'] = s_arr['lon']
self.data_array[d_index]['east'] = s_arr['east']
self.data_array[d_index]['north'] = s_arr['north']
self.data_array[d_index]['elev'] = s_arr['elev']
self.data_array[d_index]['rel_east'] = s_arr['rel_east']
self.data_array[d_index]['rel_north'] = s_arr['rel_north']
def _get_station_locations(self):
"""
extract station locations from data array
"""
if self.data_array is None:
return None
station_locations = self.data_array[['station', 'lat', 'lon',
'north', 'east', 'elev',
'rel_north', 'rel_east']]
return station_locations
station_locations = property(_get_station_locations,
_set_station_locations,
doc="""location of stations""")
# def compute_inv_error(self, comp, data_value, data_error):
# """
# compute the error from the given parameters
# """
# #compute relative error
# if comp.find('t') == 0:
# if 'floor' in self.error_type:
# abs_err = max(self.error_tipper,
# data_error)
# else:
# abs_err = self.error_tipper
# elif comp.find('z') == 0:
# if self.error_type == 'floor':
# abs_err = max(data_error,
# (self.error_floor/100.)*abs(data_value))
#
# elif self.error_type == 'value':
# abs_err = abs(data_value)*self.error_value/100.
#
# elif self.error_type == 'egbert':
# d_zxy = self.data_array[ss]['z'][ff, 0, 1]
# d_zyx = self.data_array[ss]['z'][ff, 1, 0]
# abs_err = np.sqrt(abs(d_zxy*d_zyx))*\
# self.error_egbert/100.
# elif self.error_type == 'floor_egbert':
# abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]
# d_zxy = self.data_array[ss]['z'][ff, 0, 1]
# d_zyx = self.data_array[ss]['z'][ff, 1, 0]
# if abs_err < np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.:
# abs_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
#
#
# if abs_err == 0.0:
# abs_err = 1e3
# print('''error at {0} is 0 for period {1} \n
# for {2}({3}, {4}) set to 1e3\n
# data = {5:.4e}+j{6:.4e}'''.format(
# sta, per, comp, z_ii, z_jj, zz.real,
# zz.imag))
# if self.units == 'ohm':
# abs_err /= 796.
def write_data_file(self, save_path=None, fn_basename=None,
rotation_angle=None, compute_error=True, fill=True):
"""
write data file for ModEM
will save file as save_path/fn_basename
Arguments:
------------
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
**rotation_angle** : float
angle to rotate the data by assuming N = 0,
E = 90. *default* is 0.0
Outputs:
----------
**data_fn** : string
full path to created data file
:Example: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
"""
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.data_fn = os.path.join(self.save_path, self.fn_basename)
self.get_period_list()
#rotate data if desired
if rotation_angle is not None:
self.rotation_angle = rotation_angle
#be sure to fill in data array
if fill is True:
self._fill_data_array()
# get relative station locations in grid coordinates
self.get_relative_station_locations()
#reset the header string to be informational
self._set_header_string()
# number of periods - subtract periods with all zero components
nper = len(np.where(np.mean(np.mean(np.mean(np.abs(self.data_array['z']),axis=0),axis=1),axis=1)>0)[0])
dlines = []
for inv_mode in self.inv_mode_dict[self.inv_mode]:
dlines.append(self.header_strings[0])
dlines.append(self.header_strings[1])
dlines.append('> {0}\n'.format(inv_mode))
if inv_mode.find('Impedance') > 0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_impedance))
dlines.append('> {0}\n'.format(self.units))
elif inv_mode.find('Vertical') >=0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_tipper))
dlines.append('> []\n')
dlines.append('> 0\n') #oriention, need to add at some point
dlines.append('> {0: >10.6f} {1:>10.6f}\n'.format(
self.center_position[0], self.center_position[1]))
dlines.append('> {0} {1}\n'.format(self.data_array['z'].shape[1],
self.data_array['z'].shape[0]))
for ss in range(self.data_array['z'].shape[0]):
for ff in range(self.data_array['z'].shape[1]):
for comp in self.inv_comp_dict[inv_mode]:
#index values for component with in the matrix
z_ii, z_jj = self.comp_index_dict[comp]
#get the correct key for data array according to comp
if comp.find('z') == 0:
c_key = 'z'
elif comp.find('t') == 0:
c_key = 'tip'
#get the value for that compenent at that frequency
zz = self.data_array[ss][c_key][ff, z_ii, z_jj]
if zz.real != 0.0 and zz.imag != 0.0 and \
zz.real != 1e32 and zz.imag != 1e32:
if self.formatting == '1':
per = '{0:<12.5e}'.format(self.period_list[ff])
sta = '{0:>7}'.format(self.data_array[ss]['station'])
lat = '{0:> 9.3f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 9.3f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 12.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 12.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>4}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 14.6e}'.format(zz.real/796.)
ima = '{0:> 14.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 14.6e}'.format(zz.real)
ima = '{0:> 14.6e}'.format(zz.imag)
elif self.formatting == '2':
per = '{0:<14.6e}'.format(self.period_list[ff])
sta = '{0:<10}'.format(self.data_array[ss]['station'])
lat = '{0:> 14.6f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 14.6f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 15.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 10.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>12}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 17.6e}'.format(zz.real/796.)
ima = '{0:> 17.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 17.6e}'.format(zz.real)
ima = '{0:> 17.6e}'.format(zz.imag)
if compute_error:
#compute relative error
if comp.find('t') == 0:
if 'floor' in self.error_type:
abs_err = max(self.error_tipper,
self.data_array[ss]['tip_err'][ff,0,z_ii])
else:
abs_err = self.error_tipper
elif comp.find('z') == 0:
if self.error_type == 'floor':
rel_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]/\
abs(zz)
if rel_err < self.error_floor/100.:
rel_err = self.error_floor/100.
abs_err = rel_err*abs(zz)
elif self.error_type == 'value':
abs_err = abs(zz)*self.error_value/100.
elif self.error_type == 'egbert':
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
abs_err = np.sqrt(abs(d_zxy*d_zyx))*\
self.error_egbert/100.
elif self.error_type == 'floor_egbert':
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
if abs(d_zxy) == 0.0:
d_zxy = 1E3
if abs(d_zyx) == 0.0:
d_zyx = 1e3
eg_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
if abs_err < eg_err:
abs_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
else:
pass
if abs_err == 0.0:
abs_err = 1e3
print('''error at {0} is 0 for period {1} \n
for {2}({3}, {4}) set to 1e3\n
data = {5:.4e}+j{6:.4e}'''.format(
sta, per, comp, z_ii, z_jj, zz.real,
zz.imag))
if self.units == 'ohm':
abs_err /= 796.
else:
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj].real
if c_key.find('z') >= 0 and self.units == 'ohm':
abs_err /= 796.
abs_err = '{0:> 14.6e}'.format(abs(abs_err))
#make sure that x==north, y==east, z==+down
dline = ''.join([per, sta, lat, lon, nor, eas, ele,
com, rea, ima, abs_err, '\n'])
dlines.append(dline)
dfid = file(self.data_fn, 'w')
dfid.writelines(dlines)
dfid.close()
print 'Wrote ModEM data file to {0}'.format(self.data_fn)
def convert_ws3dinv_data_file(self, ws_data_fn, station_fn=None,
save_path=None, fn_basename=None):
"""
convert a ws3dinv data file into ModEM format
Arguments:
------------
**ws_data_fn** : string
full path to WS data file
**station_fn** : string
full path to station info file output by
mtpy.modeling.ws3dinv. Or you can create one using
mtpy.modeling.ws3dinv.WSStation
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
Outputs:
-----------
**data_fn** : string
full path to created data file
:Example: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.convert_ws3dinv_data_file(r"/home/ws3dinv/inv1/WSData.dat",
station_fn=r"/home/ws3dinv/inv1/WS_Station_Locations.txt")
"""
if os.path.isfile(ws_data_fn) == False:
raise ws.WSInputError('Did not find {0}, check path'.format(ws_data_fn))
if save_path is not None:
self.save_path = save_path
else:
self.save_path = os.path.dirname(ws_data_fn)
if fn_basename is not None:
self.fn_basename = fn_basename
#--> get data from data file
wsd = ws.WSData()
wsd.read_data_file(ws_data_fn, station_fn=station_fn)
ns = wsd.data['station'].shape[0]
nf = wsd.period_list.shape[0]
self.period_list = wsd.period_list.copy()
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#--> fill data array
for ii, d_arr in enumerate(wsd.data):
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['z'][:] = d_arr['z_data']
self.data_array[ii]['z_err'][:] = d_arr['z_data_err'].real*\
d_arr['z_err_map'].real
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['lat'] = 0.0
self.data_array[ii]['lon'] = 0.0
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['elev'] = 0.0
#need to change the inversion mode to be the same as the ws_data file
if self.data_array['z'].all() == 0.0:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '4'
else:
self.inv_mode = '3'
else:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '2'
else:
self.inv_mode = '1'
#-->write file
self.write_data_file()
def read_data_file(self, data_fn=None, center_utm = None):
"""
read ModEM data file
inputs:
data_fn = full path to data file name
center_utm = option to provide real world coordinates of the center of
the grid for putting the data and model back into
utm/grid coordinates, format [east_0, north_0, z_0]
Fills attributes:
* data_array
* period_list
* mt_dict
"""
if data_fn is not None:
self.data_fn = data_fn
self.save_path = os.path.dirname(self.data_fn)
self.fn_basename = os.path.basename(self.data_fn)
if self.data_fn is None:
raise ModEMError('data_fn is None, enter a data file to read.')
elif os.path.isfile(self.data_fn) is False:
raise ModEMError('Could not find {0}, check path'.format(self.data_fn))
dfid = file(self.data_fn, 'r')
dlines = dfid.readlines()
dfid.close()
header_list = []
metadata_list = []
data_list = []
period_list = []
station_list = []
read_impedance = False
read_tipper = False
for dline in dlines:
if dline.find('#') == 0:
header_list.append(dline.strip())
elif dline.find('>') == 0:
metadata_list.append(dline[1:].strip())
if dline.lower().find('ohm') > 0:
self.units = 'ohm'
elif dline.lower().find('mv') > 0:
self.units =' [mV/km]/[nT]'
elif dline.lower().find('vertical') > 0:
read_tipper = True
read_impedance = False
elif dline.lower().find('impedance') > 0:
read_impedance = True
read_tipper = False
if dline.find('exp') > 0:
if read_impedance is True:
self.wave_sign_impedance = dline[dline.find('(')+1]
elif read_tipper is True:
self.wave_sign_tipper = dline[dline.find('(')+1]
elif len(dline[1:].strip().split()) == 2:
value_list = [float(value) for value in
dline[1:].strip().split()]
if value_list[0]%1 == 0 and value_list[1]%1 == 0:
n_periods = value_list[0]
n_stations = value_list[1]
else:
self.center_position = np.array(value_list)
else:
dline_list = dline.strip().split()
if len(dline_list) == 11:
for ii, d_str in enumerate(dline_list):
if ii != 1:
try:
dline_list[ii] = float(d_str.strip())
except ValueError:
pass
# be sure the station name is a string
else:
dline_list[ii] = d_str.strip()
period_list.append(dline_list[0])
station_list.append(dline_list[1])
data_list.append(dline_list)
#try to find rotation angle
h_list = header_list[0].split()
for hh, h_str in enumerate(h_list):
if h_str.find('_deg') > 0:
try:
self._rotation_angle = float(h_str[0:h_str.find('_deg')])
print ('Set rotation angle to {0:.1f} '.format(
self._rotation_angle)+'deg clockwise from N')
except ValueError:
pass
self.period_list = np.array(sorted(set(period_list)))
station_list = sorted(set(station_list))
#make a period dictionary to with key as period and value as index
period_dict = dict([(per, ii) for ii, per in enumerate(self.period_list)])
#--> need to sort the data into a useful fashion such that each station
# is an mt object
data_dict = {}
z_dummy = np.zeros((len(self.period_list), 2, 2), dtype='complex')
t_dummy = np.zeros((len(self.period_list), 1, 2), dtype='complex')
index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0), 'zyy':(1, 1),
'tx':(0, 0), 'ty':(0, 1)}
#dictionary for true false if station data (lat, lon, elev, etc)
#has been filled already so we don't rewrite it each time
tf_dict = {}
for station in station_list:
data_dict[station] = mt.MT()
data_dict[station].Z = mtz.Z(z_array=z_dummy.copy(),
z_err_array=z_dummy.copy().real,
freq=1./self.period_list)
data_dict[station].Tipper = mtz.Tipper(tipper_array=t_dummy.copy(),
tipper_err_array=t_dummy.copy().real,
freq=1./self.period_list)
#make sure that the station data starts out with false to fill
#the data later
tf_dict[station] = False
#fill in the data for each station
for dd in data_list:
#get the period index from the data line
p_index = period_dict[dd[0]]
#get the component index from the data line
ii, jj = index_dict[dd[7].lower()]
#if the station data has not been filled yet, fill it
if tf_dict[dd[1]] == False:
data_dict[dd[1]].lat = dd[2]
data_dict[dd[1]].lon = dd[3]
data_dict[dd[1]].grid_north = dd[4]
data_dict[dd[1]].grid_east = dd[5]
data_dict[dd[1]].grid_elev = dd[6]
data_dict[dd[1]].station = dd[1]
tf_dict[dd[1]] = True
#fill in the impedance tensor with appropriate values
if dd[7].find('Z') == 0:
z_err = dd[10]
if self.wave_sign_impedance == '+':
z_value = dd[8]+1j*dd[9]
elif self.wave_sign_impedance == '-':
z_value = dd[8]-1j*dd[9]
if self.units == 'ohm':
z_value *= 796.
z_err *= 796.
data_dict[dd[1]].Z.z[p_index, ii, jj] = z_value
data_dict[dd[1]].Z.z_err[p_index, ii, jj] = z_err
#fill in tipper with appropriate values
elif dd[7].find('T') == 0:
if self.wave_sign_tipper == '+':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]+1j*dd[9]
elif self.wave_sign_tipper == '-':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]-1j*dd[9]
data_dict[dd[1]].Tipper.tipper_err[p_index, ii, jj] = dd[10]
#make mt_dict an attribute for easier manipulation later
self.mt_dict = data_dict
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#Be sure to caclulate invariants and phase tensor for each station
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
self.mt_dict[s_key].zinv.compute_invariants()
self.mt_dict[s_key].pt.set_z_object(mt_obj.Z)
self.mt_dict[s_key].Tipper.compute_amp_phase()
self.mt_dict[s_key].Tipper.compute_mag_direction()
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.grid_elev
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
self.data_array[ii]['z'][:] = mt_obj.Z.z
self.data_array[ii]['z_err'][:] = mt_obj.Z.z_err
self.data_array[ii]['tip'][:] = mt_obj.Tipper.tipper
self.data_array[ii]['tip_err'][:] = mt_obj.Tipper.tipper_err
# option to provide real world coordinates in eastings/northings
# (ModEM data file contains real world center in lat/lon but projection
# is not provided so utm is assumed, causing errors when points cross
# utm zones. And lat/lon cut off to 3 d.p. causing errors in smaller areas)
if center_utm is not None:
self.data_array['east'] = self.data_array['rel_east'] + center_utm[0]
self.data_array['north'] = self.data_array['rel_north'] + center_utm[1]
def write_vtk_station_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_stations'):
"""
write a vtk file for station locations. For now this in relative
coordinates.
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_stations, evtk will add
on the extension .vtu
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
pointsToVTK(vtk_fn,
self.station_locations['rel_north']/1000,
self.station_locations['rel_east']/1000,
-self.station_locations['elev']/1000,
data={'elevation':self.station_locations['elev']})
print '--> Wrote station file to {0}'.format(vtk_fn)
print '-'*50
#==============================================================================
# mesh class
#==============================================================================
class Model(object):
"""
make and read a FE mesh grid
The mesh assumes the coordinate system where:
x == North
y == East
z == + down
All dimensions are in meters.
:Example 1 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
import mtpy.modeling.ModEM >>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
import mtpy.modeling.ModEM
>>> mmesh = mtpy.modeling.ModEM.Model(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = mtpy.modeling.ModEM.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 2 --> create data file first then model file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
>>> ... if edi.find('.edi') > 0]
>>> #2) create data file
>>> md = modem.Data(edi_list)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> #3) make a grid from the stations themselves with 200m cell spacing
>>> mmesh = modem.Model(edi_list=edi_list, cell_size_east=200,
cell_size_north=200,
station_locations=md.station_locations)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
:Example 3 --> Rotate Mesh: ::
>>> mmesh.mesh_rotation_angle = 60
>>> mmesh.make_mesh()
..note:: ModEM assumes all coordinates are relative to North and East, and
does not accommodate mesh rotations, therefore, here the rotation
is of the stations, which essentially does the same thing. You
will need to rotate you data to align with the 'new' coordinate
system.
==================== ======================================================
Attributes Description
==================== ======================================================
cell_size_east mesh block width in east direction
*default* is 500
cell_size_north mesh block width in north direction
*default* is 500
edi_list list of .edi files to invert for
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
model_fn full path to initial file name
n_layers total number of vertical layers in model
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
pad_east number of cells for padding on E and W sides
*default* is 7
pad_north number of cells for padding on S and N sides
*default* is 7
pad_root_east padding cells E & W will be pad_root_east**(x)
pad_root_north padding cells N & S will be pad_root_north**(x)
pad_z number of cells for padding at bottom
*default* is 4
res_list list of resistivity values for starting model
res_model starting resistivity model
mesh_rotation_angle Angle to rotate the grid to. Angle is measured
positve clockwise assuming North is 0 and east is 90.
*default* is None
save_path path to save file to
station_fn full path to station file
station_locations location of stations
title title in initial file
z1_layer first layer thickness
z_bottom absolute bottom of the model *default* is 300,000
z_target_depth Depth of deepest target, *default* is 50,000
_utm_grid_size_east size of a UTM grid in east direction.
*default* is 640000 meters
_utm_grid_size_north size of a UTM grid in north direction.
*default* is 888960 meters
==================== ======================================================
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
==================== ======================================================
Methods Description
==================== ======================================================
make_mesh makes a mesh from the given specifications
plot_mesh plots mesh to make sure everything is good
write_initial_file writes an initial model file that includes the mesh
==================== ======================================================
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
# size of cells within station area in meters
self.cell_size_east = kwargs.pop('cell_size_east', 500)
self.cell_size_north = kwargs.pop('cell_size_north', 500)
#padding cells on either side
self.pad_east = kwargs.pop('pad_east', 7)
self.pad_north = kwargs.pop('pad_north', 7)
self.pad_z = kwargs.pop('pad_z', 4)
#root of padding cells
self.pad_stretch_h= kwargs.pop('pad_stretch_h', 1.2)
self.pad_stretch_v= kwargs.pop('pad_stretch_v', 1.2)
self.z1_layer = kwargs.pop('z1_layer', 10)
self.z_target_depth = kwargs.pop('z_target_depth', 50000)
self.z_bottom = kwargs.pop('z_bottom', 300000)
#number of vertical layers
self.n_layers = kwargs.pop('n_layers', 30)
#strike angle to rotate grid to
self.mesh_rotation_angle = kwargs.pop('mesh_rotation_angle', 0)
#--> attributes to be calculated
#station information
self.station_locations = kwargs.pop('station_locations', None)
#grid nodes
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
#resistivity model
self.res_model = None
self.grid_center = None
#inital file stuff
self.model_fn = kwargs.pop('model_fn', None)
self.save_path = kwargs.pop('save_path', None)
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
self.model_fn_basename = os.path.basename(self.model_fn)
self.title = 'Model File written by MTpy.modeling.modem'
self.res_scale = kwargs.pop('res_scale', 'loge')
def get_station_locations(self):
"""
get the station locations from lats and lons
"""
utm_zones_dict = {'M':9, 'L':8, 'K':7, 'J':6, 'H':5, 'G':4, 'F':3,
'E':2, 'D':1, 'C':0, 'N':10, 'P':11, 'Q':12, 'R':13,
'S':14, 'T':15, 'U':16, 'V':17, 'W':18, 'X':19}
#if station locations are not input read from the edi files
if self.station_locations is None:
if self.edi_list is None:
raise AttributeError('edi_list is None, need to input a list of '
'edi files to read in.')
n_stations = len(self.edi_list)
if n_stations == 0:
raise ModEMError('No .edi files in edi_list, please check '
'file locations.')
#make a structured array to put station location information into
self.station_locations = np.zeros(n_stations,
dtype=[('station','|S10'),
('lat', np.float),
('lon', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('rel_east', np.float),
('rel_north', np.float),
('elev', np.float)])
#get station locations in meters
for ii, edi in enumerate(self.edi_list):
mt_obj = mt.MT(edi)
self.station_locations[ii]['lat'] = mt_obj.lat
self.station_locations[ii]['lon'] = mt_obj.lon
self.station_locations[ii]['station'] = mt_obj.station
self.station_locations[ii]['east'] = mt_obj.east
self.station_locations[ii]['north'] = mt_obj.north
self.station_locations[ii]['elev'] = mt_obj.elev
self.station_locations[ii]['zone'] = mt_obj.utm_zone
#--> need to convert lat and lon to east and north
for c_arr in self.station_locations:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'], c_arr['east'], c_arr['north'] = \
mtpy.utils.gis_tools.ll_to_utm(self._utm_ellipsoid,
c_arr['lat'],
c_arr['lon'])
#--> need to check to see if all stations are in the same zone
utm_zone_list = list(set(self.station_locations['zone']))
#if there are more than one zone, figure out which zone is the odd ball
utm_zone_dict = dict([(utmzone, 0) for utmzone in utm_zone_list])
if len(utm_zone_list) != 1:
self._utm_cross = True
for c_arr in self.station_locations:
utm_zone_dict[c_arr['zone']] += 1
#flip keys and values so the key is the number of zones and
# the value is the utm zone
utm_zone_dict = dict([(utm_zone_dict[key], key)
for key in utm_zone_dict.keys()])
#get the main utm zone as the one with the most stations in it
main_utm_zone = utm_zone_dict[max(utm_zone_dict.keys())]
#Get a list of index values where utm zones are not the
#same as the main zone
diff_zones = np.where(self.station_locations['zone'] != main_utm_zone)[0]
for c_index in diff_zones:
c_arr = self.station_locations[c_index]
c_utm_zone = c_arr['zone']
print '{0} utm_zone is {1} and does not match {2}'.format(
c_arr['station'], c_arr['zone'], main_utm_zone)
zone_shift = 1-abs(utm_zones_dict[c_utm_zone[-1]]-\
utm_zones_dict[main_utm_zone[-1]])
#--> check to see if the zone is in the same latitude
#if odd ball zone is north of main zone, add 888960 m
if zone_shift > 1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> adding {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] += north_shift
#if odd ball zone is south of main zone, subtract 88960 m
elif zone_shift < -1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> subtracting {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] -= north_shift
#--> if zone is shifted east or west
if int(c_utm_zone[0:-1]) > int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> adding {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] += east_shift
elif int(c_utm_zone[0:-1]) < int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> subtracting {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] -= east_shift
#remove the average distance to get coordinates in a relative space
self.station_locations['rel_east'] = self.station_locations['east']-\
self.station_locations['east'].mean()
self.station_locations['rel_north'] = self.station_locations['north']-\
self.station_locations['north'].mean()
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.mesh_rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.station_locations['rel_east'],
self.station_locations['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.station_locations['rel_east'][:] = new_coords[0, :]
self.station_locations['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
self.mesh_rotation_angle)
#translate the stations so they are relative to 0,0
east_center = (self.station_locations['rel_east'].max()-
np.abs(self.station_locations['rel_east'].min()))/2
north_center = (self.station_locations['rel_north'].max()-
np.abs(self.station_locations['rel_north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.station_locations['rel_east'] -= east_center
self.station_locations['rel_north'] -= north_center
def make_mesh(self, update_data_center=False):
"""
create finite element mesh according to parameters set.
The mesh is built by first finding the center of the station area.
Then cells are added in the north and east direction with width
cell_size_east and cell_size_north to the extremeties of the station
area. Padding cells are then added to extend the model to reduce
edge effects. The number of cells are pad_east and pad_north and the
increase in size is by pad_root_east and pad_root_north. The station
locations are then computed as the center of the nearest cell as
required by the code.
The vertical cells are built to increase in size exponentially with
depth. The first cell depth is first_layer_thickness and should be
about 1/10th the shortest skin depth. The layers then increase
on a log scale to z_target_depth. Then the model is
padded with pad_z number of cells to extend the depth of the model.
padding = np.round(cell_size_east*pad_root_east**np.arange(start=.5,
stop=3, step=3./pad_east))+west
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
"""
self.get_station_locations()
#find the edges of the grid
west = self.station_locations['rel_east'].min()-(1.5*self.cell_size_east)
east = self.station_locations['rel_east'].max()+(1.5*self.cell_size_east)
south = self.station_locations['rel_north'].min()-(1.5*self.cell_size_north)
north = self.station_locations['rel_north'].max()+(1.5*self.cell_size_north)
west = np.round(west, -2)
east= np.round(east, -2)
south= np.round(south, -2)
north = np.round(north, -2)
#-------make a grid around the stations from the parameters above------
#--> make grid in east-west direction
#cells within station area
east_gridr = np.arange(start=west, stop=east+self.cell_size_east,
step=self.cell_size_east)
#padding cells in the east-west direction
for ii in range(1, self.pad_east+1):
east_0 = float(east_gridr[-1])
west_0 = float(east_gridr[0])
add_size = np.round(self.cell_size_east*self.pad_stretch_h*ii, -2)
pad_w = west_0-add_size
pad_e = east_0+add_size
east_gridr = np.insert(east_gridr, 0, pad_w)
east_gridr = np.append(east_gridr, pad_e)
#--> need to make sure none of the stations lie on the nodes
for s_east in sorted(self.station_locations['rel_east']):
try:
node_index = np.where(abs(s_east-east_gridr) <
.02*self.cell_size_east)[0][0]
if s_east-east_gridr[node_index] > 0:
east_gridr[node_index] -= .02*self.cell_size_east
elif s_east-east_gridr[node_index] < 0:
east_gridr[node_index] += .02*self.cell_size_east
except IndexError:
continue
#--> make grid in north-south direction
#N-S cells with in station area
north_gridr = np.arange(start=south, stop=north+self.cell_size_north,
step=self.cell_size_north)
#padding cells in the east-west direction
for ii in range(1, self.pad_north+1):
south_0 = float(north_gridr[0])
north_0 = float(north_gridr[-1])
add_size = np.round(self.cell_size_north*self.pad_stretch_h*ii, -2)
pad_s = south_0-add_size
pad_n = north_0+add_size
north_gridr = np.insert(north_gridr, 0, pad_s)
north_gridr = np.append(north_gridr, pad_n)
#--> need to make sure none of the stations lie on the nodes
for s_north in sorted(self.station_locations['rel_north']):
try:
node_index = np.where(abs(s_north-north_gridr) <
.02*self.cell_size_north)[0][0]
if s_north-north_gridr[node_index] > 0:
north_gridr[node_index] -= .02*self.cell_size_north
elif s_north-north_gridr[node_index] < 0:
north_gridr[node_index] += .02*self.cell_size_north
except IndexError:
continue
#--> make depth grid
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth-np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth),
num=self.n_layers)[-2]),
num=self.n_layers-self.pad_z)
z_nodes = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_z])
#padding cells in the east-west direction
for ii in range(1, self.pad_z+1):
z_0 = np.float(z_nodes[-2])
pad_d = np.round(z_0*self.pad_stretch_v*ii, -2)
z_nodes = np.append(z_nodes, pad_d)
#make an array of absolute values
z_grid = np.array([z_nodes[:ii+1].sum() for ii in range(z_nodes.shape[0])])
#---Need to make an array of the individual cell dimensions for
# modem
east_nodes = east_gridr.copy()
nx = east_gridr.shape[0]
east_nodes[:nx/2] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2))])
east_nodes[nx/2:] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2)-1, nx-1)])
north_nodes = north_gridr.copy()
ny = north_gridr.shape[0]
north_nodes[:ny/2] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2))])
north_nodes[ny/2:] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2)-1, ny-1)])
#--put the grids into coordinates relative to the center of the grid
east_grid = east_nodes.copy()
east_grid[:int(nx/2)] = -np.array([east_nodes[ii:int(nx/2)].sum()
for ii in range(int(nx/2))])
east_grid[int(nx/2):] = np.array([east_nodes[int(nx/2):ii+1].sum()
for ii in range(int(nx/2), nx)])-\
east_nodes[int(nx/2)]
north_grid = north_nodes.copy()
north_grid[:int(ny/2)] = -np.array([north_nodes[ii:int(ny/2)].sum()
for ii in range(int(ny/2))])
north_grid[int(ny/2):] = np.array([north_nodes[int(ny/2):ii+1].sum()
for ii in range(int(ny/2),ny)])-\
north_nodes[int(ny/2)]
#compute grid center
center_east = -east_nodes.__abs__().sum()/2
center_north = -north_nodes.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
#make nodes attributes
self.nodes_east = east_nodes
self.nodes_north = north_nodes
self.nodes_z = z_nodes
self.grid_east = east_grid
self.grid_north = north_grid
self.grid_z = z_grid
#--> print out useful information
print '-'*15
print ' Number of stations = {0}'.format(len(self.station_locations))
print ' Dimensions: '
print ' e-w = {0}'.format(east_grid.shape[0])
print ' n-s = {0}'.format(north_grid.shape[0])
print ' z = {0} (without 7 air layers)'.format(z_grid.shape[0])
print ' Extensions: '
print ' e-w = {0:.1f} (m)'.format(east_nodes.__abs__().sum())
print ' n-s = {0:.1f} (m)'.format(north_nodes.__abs__().sum())
print ' 0-z = {0:.1f} (m)'.format(self.nodes_z.__abs__().sum())
print ' Stations rotated by: {0:.1f} deg clockwise positive from N'.format(self.mesh_rotation_angle)
print ''
print ' ** Note ModEM does not accommodate mesh rotations, it assumes'
print ' all coordinates are aligned to geographic N, E'
print ' therefore rotating the stations will have a similar effect'
print ' as rotating the mesh.'
print '-'*15
if self._utm_cross is True:
print '{0} {1} {2}'.format('-'*25, 'NOTE', '-'*25)
print ' Survey crosses UTM zones, be sure that stations'
print ' are properly located, if they are not, adjust parameters'
print ' _utm_grid_size_east and _utm_grid_size_north.'
print ' these are in meters and represent the utm grid size'
print ' Example: '
print ' >>> modem_model._utm_grid_size_east = 644000'
print ' >>> modem_model.make_mesh()'
print ''
print '-'*56
def plot_mesh(self, east_limits=None, north_limits=None, z_limits=None,
**kwargs):
"""
Arguments:
----------
**east_limits** : tuple (xmin,xmax)
plot min and max distances in meters for the
E-W direction. If None, the east_limits
will be set to furthest stations east and west.
*default* is None
**north_limits** : tuple (ymin,ymax)
plot min and max distances in meters for the
N-S direction. If None, the north_limits
will be set to furthest stations north and south.
*default* is None
**z_limits** : tuple (zmin,zmax)
plot min and max distances in meters for the
vertical direction. If None, the z_limits is
set to the number of layers. Z is positive down
*default* is None
"""
fig_size = kwargs.pop('fig_size', [6, 6])
fig_dpi = kwargs.pop('fig_dpi', 300)
fig_num = kwargs.pop('fig_num', 1)
station_marker = kwargs.pop('station_marker', 'v')
marker_color = kwargs.pop('station_color', 'b')
marker_size = kwargs.pop('marker_size', 2)
line_color = kwargs.pop('line_color', 'k')
line_width = kwargs.pop('line_width', .5)
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
plt.rcParams['figure.subplot.left'] = .12
plt.rcParams['font.size'] = 7
fig = plt.figure(fig_num, figsize=fig_size, dpi=fig_dpi)
plt.clf()
#make a rotation matrix to rotate data
#cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
#sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
#turns out ModEM has not accomodated rotation of the grid, so for
#now we will not rotate anything.
cos_ang = 1
sin_ang = 0
#--->plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
#plot station locations
plot_east = self.station_locations['rel_east']
plot_north = self.station_locations['rel_north']
ax1.scatter(plot_east,
plot_north,
marker=station_marker,
c=marker_color,
s=marker_size)
east_line_xlist = []
east_line_ylist = []
north_min = self.grid_north.min()
north_max = self.grid_north.max()
for xx in self.grid_east:
east_line_xlist.extend([xx*cos_ang+north_min*sin_ang,
xx*cos_ang+north_max*sin_ang])
east_line_xlist.append(None)
east_line_ylist.extend([-xx*sin_ang+north_min*cos_ang,
-xx*sin_ang+north_max*cos_ang])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
north_line_xlist = []
north_line_ylist = []
east_max = self.grid_east.max()
east_min = self.grid_east.min()
for yy in self.grid_north:
north_line_xlist.extend([east_min*cos_ang+yy*sin_ang,
east_max*cos_ang+yy*sin_ang])
north_line_xlist.append(None)
north_line_ylist.extend([-east_min*sin_ang+yy*cos_ang,
-east_max*sin_ang+yy*cos_ang])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=line_width,
color=line_color)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
if north_limits == None:
ax1.set_ylim(plot_north.min()-10*self.cell_size_north,
plot_north.max()+ 10*self.cell_size_east)
else:
ax1.set_ylim(north_limits)
ax1.set_ylabel('Northing (m)', fontdict={'size':9,'weight':'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size':9,'weight':'bold'})
##----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto', sharex=ax1)
#plot the grid
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([0,
self.grid_z.max()])
east_line_ylist.append(None)
ax2.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
z_line_xlist = []
z_line_ylist = []
for zz in self.grid_z:
z_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
z_line_xlist.append(None)
z_line_ylist.extend([zz, zz])
z_line_ylist.append(None)
ax2.plot(z_line_xlist,
z_line_ylist,
lw=line_width,
color=line_color)
#--> plot stations
ax2.scatter(plot_east,
[0]*self.station_locations.shape[0],
marker=station_marker,
c=marker_color,
s=marker_size)
if z_limits == None:
ax2.set_ylim(self.z_target_depth, -200)
else:
ax2.set_ylim(z_limits)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
ax2.set_ylabel('Depth (m)', fontdict={'size':9, 'weight':'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size':9, 'weight':'bold'})
plt.show()
def write_model_file(self, **kwargs):
"""
will write an initial file for ModEM.
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards. This means that index [0, 0, 0] is the
southwest corner of the first layer. Therefore if you build a model
by hand the layer block will look as it should in map view.
Also, the xgrid, ygrid and zgrid are assumed to be the relative
distance between neighboring nodes. This is needed because wsinv3d
builds the model from the bottom SW corner assuming the cell width
from the init file.
Key Word Arguments:
----------------------
**nodes_north** : np.array(nx)
block dimensions (m) in the N-S direction.
**Note** that the code reads the grid assuming that
index=0 is the southern most point.
**nodes_east** : np.array(ny)
block dimensions (m) in the E-W direction.
**Note** that the code reads in the grid assuming that
index=0 is the western most point.
**nodes_z** : np.array(nz)
block dimensions (m) in the vertical direction.
This is positive downwards.
**save_path** : string
Path to where the initial file will be saved
to savepath/model_fn_basename
**model_fn_basename** : string
basename to save file to
*default* is ModEM_Model.ws
file is saved at savepath/model_fn_basename
**title** : string
Title that goes into the first line
*default* is Model File written by MTpy.modeling.modem
**res_model** : np.array((nx,ny,nz))
Prior resistivity model.
.. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
**res_scale** : [ 'loge' | 'log' | 'log10' | 'linear' ]
scale of resistivity. In the ModEM code it
converts everything to Loge,
*default* is 'loge'
"""
keys = ['nodes_east', 'nodes_north', 'nodes_z', 'title',
'res_model', 'save_path', 'model_fn', 'model_fn_basename']
for key in keys:
try:
setattr(self, key, kwargs[key])
except KeyError:
if self.__dict__[key] is None:
pass
if self.save_path is not None:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
if self.model_fn is None:
if self.save_path is None:
self.save_path = os.getcwd()
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
elif os.path.isdir(self.save_path) == True:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
else:
self.save_path = os.path.dirname(self.save_path)
self.model_fn= self.save_path
if self.res_model is None or type(self.res_model) is float or\
type(self.res_model) is int:
res_model = np.zeros((self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0]))
if self.res_model is None:
res_model[:, :, :] = 100.0
self.res_model = res_model
else:
res_model[:, :, :] = self.res_model
self.res_model = res_model
#--> write file
ifid = file(self.model_fn, 'w')
ifid.write('# {0}\n'.format(self.title.upper()))
ifid.write('{0:>5}{1:>5}{2:>5}{3:>5} {4}\n'.format(self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0],
0,
self.res_scale.upper()))
#write S --> N node block
for ii, nnode in enumerate(self.nodes_north):
ifid.write('{0:>12.3f}'.format(abs(nnode)))
ifid.write('\n')
#write W --> E node block
for jj, enode in enumerate(self.nodes_east):
ifid.write('{0:>12.3f}'.format(abs(enode)))
ifid.write('\n')
#write top --> bottom node block
for kk, zz in enumerate(self.nodes_z):
ifid.write('{0:>12.3f}'.format(abs(zz)))
ifid.write('\n')
#write the resistivity in log e format
if self.res_scale.lower() == 'loge':
write_res_model = np.log(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'log' or \
self.res_scale.lower() == 'log10':
write_res_model = np.log10(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'linear':
write_res_model = self.res_model[::-1, :, :]
#write out the layers from resmodel
for zz in range(self.nodes_z.shape[0]):
ifid.write('\n')
for ee in range(self.nodes_east.shape[0]):
for nn in range(self.nodes_north.shape[0]):
ifid.write('{0:>13.5E}'.format(write_res_model[nn, ee, zz]))
ifid.write('\n')
if self.grid_center is None:
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_north.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
ifid.write('\n{0:>16.3f}{1:>16.3f}{2:>16.3f}\n'.format(self.grid_center[0],
self.grid_center[1], self.grid_center[2]))
if self.mesh_rotation_angle is None:
ifid.write('{0:>9.3f}\n'.format(0))
else:
ifid.write('{0:>9.3f}\n'.format(self.mesh_rotation_angle))
ifid.close()
print 'Wrote file to: {0}'.format(self.model_fn)
def read_model_file(self, model_fn=None):
"""
read an initial file and return the pertinent information including
grid positions in coordinates relative to the center point (0,0) and
starting model.
Note that the way the model file is output, it seems is that the
blocks are setup as
ModEM: WS:
---------- -----
0-----> N_north 0-------->N_east
| |
| |
V V
N_east N_north
Arguments:
----------
**model_fn** : full path to initializing file.
Outputs:
--------
**nodes_north** : np.array(nx)
array of nodes in S --> N direction
**nodes_east** : np.array(ny)
array of nodes in the W --> E direction
**nodes_z** : np.array(nz)
array of nodes in vertical direction positive downwards
**res_model** : dictionary
dictionary of the starting model with keys as layers
**res_list** : list
list of resistivity values in the model
**title** : string
title string
"""
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise ModEMError('model_fn is None, input a model file name')
if os.path.isfile(self.model_fn) is None:
raise ModEMError('Cannot find {0}, check path'.format(self.model_fn))
self.save_path = os.path.dirname(self.model_fn)
ifid = file(self.model_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.title = ilines[0].strip()
#get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
n_north = int(nsize[0])
n_east = int(nsize[1])
n_z = int(nsize[2])
log_yn = nsize[4]
#get nodes
self.nodes_north = np.array([np.float(nn)
for nn in ilines[2].strip().split()])
self.nodes_east = np.array([np.float(nn)
for nn in ilines[3].strip().split()])
self.nodes_z = np.array([np.float(nn)
for nn in ilines[4].strip().split()])
self.res_model = np.zeros((n_north, n_east, n_z))
#get model
count_z = 0
line_index= 6
count_e = 0
while count_z < n_z:
iline = ilines[line_index].strip().split()
#blank lines spit the depth blocks, use those as a marker to
#set the layer number and start a new block
if len(iline) == 0:
count_z += 1
count_e = 0
line_index += 1
#each line in the block is a line of N-->S values for an east value
else:
north_line = np.array([float(nres) for nres in
ilines[line_index].strip().split()])
# Need to be sure that the resistivity array matches
# with the grids, such that the first index is the
# furthest south
self.res_model[:, count_e, count_z] = north_line[::-1]
count_e += 1
line_index += 1
#--> get grid center and rotation angle
if len(ilines) > line_index:
for iline in ilines[line_index:]:
ilist = iline.strip().split()
#grid center
if len(ilist) == 3:
self.grid_center = np.array(ilist, dtype=np.float)
#rotation angle
elif len(ilist) == 1:
self.rotation_angle = np.float(ilist[0])
else:
pass
#--> make sure the resistivity units are in linear Ohm-m
if log_yn.lower() == 'loge':
self.res_model = np.e**self.res_model
elif log_yn.lower() == 'log' or log_yn.lower() == 'log10':
self.res_model = 10**self.res_model
#put the grids into coordinates relative to the center of the grid
self.grid_north = np.array([self.nodes_north[0:ii].sum()
for ii in range(n_north + 1)])
self.grid_east = np.array([self.nodes_east[0:ii].sum()
for ii in range(n_east + 1)])
self.grid_z = np.array([self.nodes_z[:ii+1].sum()
for ii in range(n_z + 1)])
# center the grids
if self.grid_center is not None:
self.grid_north += self.grid_center[0]
self.grid_east += self.grid_center[1]
self.grid_z += self.grid_center[2]
self.cell_size_east = stats.mode(self.nodes_east)[0][0]
self.cell_size_north = stats.mode(self.nodes_north)[0][0]
self.pad_east = np.where(self.nodes_east[0:int(self.nodes_east.size/2)]
!= self.cell_size_east)[0][-1]
self.north_pad = np.where(self.nodes_north[0:int(self.nodes_north.size/2)]
!= self.cell_size_north)[0][-1]
def read_ws_model_file(self, ws_model_fn):
"""
reads in a WS3INV3D model file
"""
ws_model_obj = ws.WSModel(ws_model_fn)
ws_model_obj.read_model_file()
#set similar attributes
for ws_key in ws_model_obj.__dict__.keys():
for md_key in self.__dict__.keys():
if ws_key == md_key:
setattr(self, ws_key, ws_model_obj.__dict__[ws_key])
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_norths.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
def write_vtk_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_model_res'):
"""
write a vtk file to view in Paraview or other
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_model_res, evtk will add
on the extension .vtr
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
gridToVTK(vtk_fn,
self.grid_north/1000.,
self.grid_east/1000.,
self.grid_z/1000.,
pointData={'resistivity':self.res_model})
print '-'*50
print '--> Wrote model file to {0}\n'.format(vtk_fn)
print '='*26
print ' model dimensions = {0}'.format(self.res_model.shape)
print ' * north {0}'.format(self.grid_north.shape[0])
print ' * east {0}'.format(self.grid_east.shape[0])
print ' * depth {0}'.format(self.grid_z.shape[0])
print '='*26
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Inv(object):
"""
read and write control file for how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.output_fn = kwargs.pop('output_fn', 'MODULAR_NLCG')
self.lambda_initial = kwargs.pop('lambda_initial', 10)
self.lambda_step = kwargs.pop('lambda_step', 10)
self.model_search_step = kwargs.pop('model_search_step', 1)
self.rms_reset_search = kwargs.pop('rms_reset_search', 2.0e-3)
self.rms_target = kwargs.pop('rms_target', 1.05)
self.lambda_exit = kwargs.pop('lambda_exit', 1.0e-4)
self.max_iterations = kwargs.pop('max_iterations', 100)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.inv')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Model and data output file name',
'Initial damping factor lambda',
'To update lambda divide by',
'Initial search step in model units',
'Restart when rms diff is less than',
'Exit search when rms is less than',
'Exit when lambda is less than',
'Maximum number of iterations']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<', '<.1f', '<.1f', '<.1f', '<.1e',
'<.2f', '<.1e', '<.0f'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<35}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['output_fn', 'lambda_initial','lambda_step',
'model_search_step','rms_reset_search','rms_target',
'lambda_exit','max_iterations']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Fwd(object):
"""
read and write control file for
This file controls how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.num_qmr_iter = kwargs.pop('num_qmr_iter', 40)
self.max_num_div_calls = kwargs.pop('max_num_div_calls', 20)
self.max_num_div_iters = kwargs.pop('max_num_div_iters', 100)
self.misfit_tol_fwd = kwargs.pop('misfit_tol_fwd', 1.0e-7)
self.misfit_tol_adj = kwargs.pop('misfit_tol_adj', 1.0e-7)
self.misfit_tol_div = kwargs.pop('misfit_tol_div', 1.0e-5)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.fwd')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Number of QMR iters per divergence correction',
'Maximum number of divergence correction calls',
'Maximum number of divergence correction iters',
'Misfit tolerance for EM forward solver',
'Misfit tolerance for EM adjoint solver',
'Misfit tolerance for divergence correction']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<.0f', '<.0f', '<.0f', '<.1e', '<.1e',
'<.1e'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<47}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['num_qmr_iter','max_num_div_calls', 'max_num_div_iters',
'misfit_tol_fwd', 'misfit_tol_adj', 'misfit_tol_div']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# covariance
#==============================================================================
class Covariance(object):
"""
read and write covariance files
"""
def __init__(self, grid_dimensions=None, **kwargs):
self.grid_dimensions = grid_dimensions
self.smoothing_east = kwargs.pop('smoothing_east', 0.3)
self.smoothing_north = kwargs.pop('smoothing_north', 0.3)
self.smoothing_z = kwargs.pop('smoothing_z', 0.3)
self.smoothing_num = kwargs.pop('smoothing_num', 1)
self.exception_list = kwargs.pop('exception_list', [])
self.mask_arr = kwargs.pop('mask_arr', None)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.cov_fn_basename = kwargs.pop('cov_fn_basename', 'covariance.cov')
self.cov_fn = kwargs.pop('cov_fn', None)
self._header_str = '\n'.join(['+{0}+'.format('-'*77),
'| This file defines model covariance for a recursive autoregression scheme. |',
'| The model space may be divided into distinct areas using integer masks. |',
'| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |',
'| air, ocean and the rest of the model is turned off automatically. You can |',
'| also define exceptions to override smoothing between any two model areas. |',
'| To turn off smoothing set it to zero. This header is 16 lines long. |',
'| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |',
'| 2. Smoothing in the X direction (NzEarth real values) |',
'| 3. Smoothing in the Y direction (NzEarth real values) |',
'| 4. Vertical smoothing (1 real value) |',
'| 5. Number of times the smoothing should be applied (1 integer >= 0) |',
'| 6. Number of exceptions (1 integer >= 0) |',
'| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |',
'| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|',
'+{0}+'.format('-'*77)])
def write_covariance_file(self, cov_fn=None, save_path=None,
cov_fn_basename=None, model_fn=None,
sea_water=0.3, air=1e12):
"""
write a covariance file
"""
if model_fn is not None:
mod_obj = Model()
mod_obj.read_model_file(model_fn)
print 'Reading {0}'.format(model_fn)
self.grid_dimensions = mod_obj.res_model.shape
self.mask_arr = np.ones_like(mod_obj.res_model)
self.mask_arr[np.where(mod_obj.res_model > air*.9)] = 0
self.mask_arr[np.where((mod_obj.res_model < sea_water*1.1) &
(mod_obj.res_model > sea_water*.9))] = 9
if self.grid_dimensions is None:
raise ModEMError('Grid dimensions are None, input as (Nx, Ny, Nz)')
if cov_fn is not None:
self.cov_fn = cov_fn
else:
if save_path is not None:
self.save_path = save_path
if cov_fn_basename is not None:
self.cov_fn_basename = cov_fn_basename
self.cov_fn = os.path.join(self.save_path, self.cov_fn_basename)
clines = [self._header_str]
clines.append('\n\n')
#--> grid dimensions
clines.append(' {0:<10}{1:<10}{2:<10}\n'.format(self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
clines.append('\n')
#--> smoothing in north direction
n_smooth_line = ''
for zz in range(self.grid_dimensions[0]):
n_smooth_line += ' {0:<5.1f}'.format(self.smoothing_north)
clines.append(n_smooth_line+'\n')
#--> smoothing in east direction
e_smooth_line = ''
for zz in range(self.grid_dimensions[1]):
e_smooth_line += ' {0:<5.1f}'.format(self.smoothing_east)
clines.append(e_smooth_line+'\n')
#--> smoothing in vertical direction
clines.append(' {0:<5.1f}\n'.format(self.smoothing_z))
clines.append('\n')
#--> number of times to apply smoothing
clines.append(' {0:<2.0f}\n'.format(self.smoothing_num))
clines.append('\n')
#--> exceptions
clines.append(' {0:<.0f}\n'.format(len(self.exception_list)))
for exc in self.exception_list:
clines.append('{0:<5.0f}{1:<5.0f}{2:<5.0f}\n'.format(exc[0],
exc[1],
exc[2]))
clines.append('\n')
clines.append('\n')
#--> mask array
if self.mask_arr is None:
self.mask_arr = np.ones((self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
for zz in range(self.mask_arr.shape[2]):
clines.append(' {0:<8.0f}{0:<8.0f}\n'.format(zz+1))
for nn in range(self.mask_arr.shape[0]):
cline = ''
for ee in range(self.mask_arr.shape[1]):
cline += '{0:^3.0f}'.format(self.mask_arr[nn, ee, zz])
clines.append(cline+'\n')
cfid = file(self.cov_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote covariance file to {0}'.format(self.cov_fn)
#==============================================================================
# Add in elevation to the model
#==============================================================================
#--> read in ascii dem file
def read_dem_ascii(ascii_fn, cell_size=500, model_center=(0, 0), rot_90=0):
"""
read in dem which is ascii format
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
"""
dfid = file(ascii_fn, 'r')
d_dict = {}
for ii in range(6):
dline = dfid.readline()
dline = dline.strip().split()
key = dline[0].strip().lower()
value = float(dline[1].strip())
d_dict[key] = value
x0 = d_dict['xllcorner']
y0 = d_dict['yllcorner']
nx = int(d_dict['ncols'])
ny = int(d_dict['nrows'])
cs = d_dict['cellsize']
# read in the elevation data
elevation = np.zeros((nx, ny))
for ii in range(1, int(ny)+2):
dline = dfid.readline()
if len(str(dline)) > 1:
#needs to be backwards because first line is the furthest north row.
elevation[:, -ii] = np.array(dline.strip().split(' '), dtype='float')
else:
break
dfid.close()
# create lat and lon arrays from the dem fle
lon = np.arange(x0, x0+cs*(nx), cs)
lat = np.arange(y0, y0+cs*(ny), cs)
# calculate the lower left and uper right corners of the grid in meters
ll_en = mtpy.utils.gis_tools.ll_to_utm(23, lat[0], lon[0])
ur_en = mtpy.utils.gis_tools.ll_to_utm(23, lat[-1], lon[-1])
# estimate cell sizes for each dem measurement
d_east = abs(ll_en[1]-ur_en[1])/nx
d_north = abs(ll_en[2]-ur_en[2])/ny
# calculate the number of new cells according to the given cell size
# if the given cell size and cs are similar int could make the value 0,
# hence the need to make it one if it is 0.
num_cells = max([1, int(cell_size/np.mean([d_east, d_north]))])
# make easting and northing arrays in meters corresponding to lat and lon
east = np.arange(ll_en[1], ur_en[1], d_east)
north = np.arange(ll_en[2], ur_en[2], d_north)
#resample the data accordingly
new_east = east[np.arange(0, east.shape[0], num_cells)]
new_north = north[np.arange(0, north.shape[0], num_cells)]
new_x, new_y = np.meshgrid(np.arange(0, east.shape[0], num_cells),
np.arange(0, north.shape[0], num_cells),
indexing='ij')
elevation = elevation[new_x, new_y]
# estimate the shift of the DEM to relative model coordinates
shift_east = new_east.mean()-model_center[0]
shift_north = new_north.mean()-model_center[1]
# shift the easting and northing arrays accordingly so the DEM and model
# are collocated.
new_east = (new_east-new_east.mean())+shift_east
new_north = (new_north-new_north.mean())+shift_north
# need to rotate cause I think I wrote the dem backwards
if rot_90 == 1 or rot_90 == 3:
elevation = np.rot90(elevation, rot_90)
return new_north, new_east, elevation
else:
elevation = np.rot90(elevation, rot_90)
return new_east, new_north, elevation
def interpolate_elevation(elev_east, elev_north, elevation, model_east,
model_north, pad=3):
"""
interpolate the elevation onto the model grid.
Arguments:
---------------
*elev_east* : np.ndarray(num_east_nodes)
easting grid for elevation model
*elev_north* : np.ndarray(num_north_nodes)
northing grid for elevation model
*elevation* : np.ndarray(num_east_nodes, num_north_nodes)
elevation model assumes x is east, y is north
Units are meters
*model_east* : np.ndarray(num_east_nodes_model)
relative easting grid of resistivity model
*model_north* : np.ndarray(num_north_nodes_model)
relative northin grid of resistivity model
*pad* : int
number of cells to repeat elevation model by. So for pad=3,
then the interpolated elevation model onto the resistivity
model grid will have the outer 3 cells will be repeats of
the adjacent cell. This is to extend the elevation model
to the resistivity model cause most elevation models will
not cover the entire area.
Returns:
--------------
*interp_elev* : np.ndarray(num_north_nodes_model, num_east_nodes_model)
the elevation model interpolated onto the resistivity
model grid.
"""
# need to line up the elevation with the model
grid_east, grid_north = np.broadcast_arrays(elev_east[:, None],
elev_north[None, :])
# interpolate onto the model grid
interp_elev = spi.griddata((grid_east.ravel(), grid_north.ravel()),
elevation.ravel(),
(model_east[:, None],
model_north[None, :]),
method='linear',
fill_value=elevation.mean())
interp_elev[0:pad, pad:-pad] = interp_elev[pad, pad:-pad]
interp_elev[-pad:, pad:-pad] = interp_elev[-pad-1, pad:-pad]
interp_elev[:, 0:pad] = interp_elev[:, pad].repeat(pad).reshape(
interp_elev[:, 0:pad].shape)
interp_elev[:, -pad:] = interp_elev[:, -pad-1].repeat(pad).reshape(
interp_elev[:, -pad:].shape)
# transpose the modeled elevation to align with x=N, y=E
interp_elev = interp_elev.T
return interp_elev
def make_elevation_model(interp_elev, model_nodes_z, elevation_cell=30,
pad=3, res_air=1e12, fill_res=100, res_sea=0.3):
"""
Take the elevation data of the interpolated elevation model and map that
onto the resistivity model by adding elevation cells to the existing model.
..Note: that if there are large elevation gains, the elevation cell size
might need to be increased.
Arguments:
-------------
*interp_elev* : np.ndarray(num_nodes_north, num_nodes_east)
elevation model that has been interpolated onto the
resistivity model grid. Units are in meters.
*model_nodes_z* : np.ndarray(num_z_nodes_of_model)
vertical nodes of the resistivity model without
topography. Note these are the nodes given in
relative thickness, not the grid, which is total
depth. Units are meters.
*elevation_cell* : float
height of elevation cells to be added on. These
are assumed to be the same at all elevations.
Units are in meters
*pad* : int
number of cells to look for maximum and minimum elevation.
So if you only want elevations within the survey area,
set pad equal to the number of padding cells of the
resistivity model grid.
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
*fill_res* : float
resistivity value of subsurface in Ohm-m.
Returns:
-------------
*elevation_model* : np.ndarray(num_north_nodes, num_east_nodes,
num_elev_nodes+num_z_nodes)
Model grid with elevation mapped onto it.
Where anything above the surface will be given the
value of res_air, everything else will be fill_res
*new_nodes_z* : np.ndarray(num_z_nodes+num_elev_nodes)
a new array of vertical nodes, where any nodes smaller
than elevation_cell will be set to elevation_cell.
This can be input into a modem.Model object to
rewrite the model file.
"""
# calculate the max elevation within survey area
elev_max = interp_elev[pad:-pad, pad:-pad].max()
# need to set sea level to 0 elevation
elev_min = max([0, interp_elev[pad:-pad, pad:-pad].min()])
# scale the interpolated elevations to fit within elev_max, elev_min
interp_elev[np.where(interp_elev > elev_max)] = elev_max
#interp_elev[np.where(interp_elev < elev_min)] = elev_min
# calculate the number of elevation cells needed
num_elev_cells = int((elev_max-elev_min)/elevation_cell)
print 'Number of elevation cells: {0}'.format(num_elev_cells)
# find sea level if it is there
if elev_min < 0:
sea_level_index = num_elev_cells-abs(int((elev_min)/elevation_cell))-1
else:
sea_level_index = num_elev_cells-1
print 'Sea level index is {0}'.format(sea_level_index)
# make an array of just the elevation for the model
# north is first index, east is second, vertical is third
elevation_model = np.ones((interp_elev.shape[0],
interp_elev.shape[1],
num_elev_cells+model_nodes_z.shape[0]))
elevation_model[:, :, :] = fill_res
# fill in elevation model with air values. Remeber Z is positive down, so
# the top of the model is the highest point and index 0 is highest
# elevation
for nn in range(interp_elev.shape[0]):
for ee in range(interp_elev.shape[1]):
# need to test for ocean
if interp_elev[nn, ee] < 0:
# fill in from bottom to sea level, then rest with air
elevation_model[nn, ee, 0:sea_level_index] = res_air
dz = sea_level_index+abs(int((interp_elev[nn, ee])/elevation_cell))+1
elevation_model[nn, ee, sea_level_index:dz] = res_sea
else:
dz = int((elev_max-interp_elev[nn, ee])/elevation_cell)
elevation_model[nn, ee, 0:dz] = res_air
# make new z nodes array
new_nodes_z = np.append(np.repeat(elevation_cell, num_elev_cells),
model_nodes_z)
new_nodes_z[np.where(new_nodes_z < elevation_cell)] = elevation_cell
return elevation_model, new_nodes_z
def add_topography_to_model(dem_ascii_fn, model_fn, model_center=(0,0),
rot_90=0, cell_size=500, elev_cell=30):
"""
Add topography to an existing model from a dem in ascii format.
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Arguments:
-------------
*dem_ascii_fn* : string
full path to ascii dem file
*model_fn* : string
full path to existing ModEM model file
*model_center* : (east, north) in meters
Sometimes the center of the DEM and the center of the
model don't line up. Use this parameter to line
everything up properly.
*rot_90* : [ 0 | 1 | 2 | 3 ]
rotate the elevation model by rot_90*90 degrees. Sometimes
the elevation model is flipped depending on your coordinate
system.
*cell_size* : float (meters)
horizontal cell size of grid to interpolate elevation
onto. This should be smaller or equal to the input
model cell size to be sure there is not spatial aliasing
*elev_cell* : float (meters)
vertical size of each elevation cell. This value should
be about 1/10th the smalles skin depth.
Returns:
---------------
*new_model_fn* : string
full path to model file that contains topography
"""
### 1.) read in the dem and center it onto the resistivity model
e_east, e_north, elevation = read_dem_ascii(dem_ascii_fn, cell_size=500,
model_center=model_center,
rot_90=3)
m_obj = Model()
m_obj.read_model_file(model_fn)
### 2.) interpolate the elevation model onto the model grid
m_elev = interpolate_elevation(e_east, e_north, elevation,
m_obj.grid_east, m_obj.grid_north, pad=3)
### 3.) make a resistivity model that incoorporates topography
mod_elev, elev_nodes_z = make_elevation_model(m_elev, m_obj.nodes_z,
elevation_cell=elev_cell)
### 4.) write new model file
m_obj.nodes_z = elev_nodes_z
m_obj.res_model = mod_elev
m_obj.write_model_file(model_fn_basename='{0}_topo.rho'.format(
os.path.basename(m_obj.model_fn)[0:-4]))
def change_data_elevation(data_fn, model_fn, new_data_fn=None, res_air=1e12):
"""
At each station in the data file rewrite the elevation, so the station is
on the surface, not floating in air.
Arguments:
------------------
*data_fn* : string
full path to a ModEM data file
*model_fn* : string
full path to ModEM model file that has elevation
incoorporated.
*new_data_fn* : string
full path to new data file name. If None, then
new file name will add _elev.dat to input filename
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
Returns:
-------------
*new_data_fn* : string
full path to new data file.
"""
d_obj = Data()
d_obj.read_data_file(data_fn)
m_obj = Model()
m_obj.read_model_file(model_fn)
for key in d_obj.mt_dict.keys():
mt_obj = d_obj.mt_dict[key]
e_index = np.where(m_obj.grid_east > mt_obj.grid_east)[0][0]
n_index = np.where(m_obj.grid_north > mt_obj.grid_north)[0][0]
z_index = np.where(m_obj.res_model[n_index, e_index, :] < res_air*.9)[0][0]
s_index = np.where(d_obj.data_array['station']==key)[0][0]
d_obj.data_array[s_index]['elev'] = m_obj.grid_z[z_index]
mt_obj.grid_elev = m_obj.grid_z[z_index]
if new_data_fn is None:
new_dfn = '{0}{1}'.format(data_fn[:-4], '_elev.dat')
else:
new_dfn=new_data_fn
d_obj.write_data_file(save_path=os.path.dirname(new_dfn),
fn_basename=os.path.basename(new_dfn),
compute_error=False,
fill=False)
return new_dfn
#==============================================================================
# Manipulate the model to test structures or create a starting model
#==============================================================================
class ModelManipulator(Model):
"""
will plot a model from wsinv3d or init file so the user can manipulate the
resistivity values relatively easily. At the moment only plotted
in map view.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> initial_fn = r"/home/MT/ws3dinv/Inv1/WSInitialFile"
>>> mm = ws.WSModelManipulator(initial_fn=initial_fn)
=================== =======================================================
Buttons Description
=================== =======================================================
'=' increase depth to next vertical node (deeper)
'-' decrease depth to next vertical node (shallower)
'q' quit the plot, rewrites initial file when pressed
'a' copies the above horizontal layer to the present layer
'b' copies the below horizonal layer to present layer
'u' undo previous change
=================== =======================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ax1 matplotlib.axes instance for mesh plot of the model
ax2 matplotlib.axes instance of colorbar
cb matplotlib.colorbar instance for colorbar
cid_depth matplotlib.canvas.connect for depth
cmap matplotlib.colormap instance
cmax maximum value of resistivity for colorbar. (linear)
cmin minimum value of resistivity for colorbar (linear)
data_fn full path fo data file
depth_index integer value of depth slice for plotting
dpi resolution of figure in dots-per-inch
dscale depth scaling, computed internally
east_line_xlist list of east mesh lines for faster plotting
east_line_ylist list of east mesh lines for faster plotting
fdict dictionary of font properties
fig matplotlib.figure instance
fig_num number of figure instance
fig_size size of figure in inches
font_size size of font in points
grid_east location of east nodes in relative coordinates
grid_north location of north nodes in relative coordinates
grid_z location of vertical nodes in relative coordinates
initial_fn full path to initial file
m_height mean height of horizontal cells
m_width mean width of horizontal cells
map_scale [ 'm' | 'km' ] scale of map
mesh_east np.meshgrid of east, north
mesh_north np.meshgrid of east, north
mesh_plot matplotlib.axes.pcolormesh instance
model_fn full path to model file
new_initial_fn full path to new initial file
nodes_east spacing between east nodes
nodes_north spacing between north nodes
nodes_z spacing between vertical nodes
north_line_xlist list of coordinates of north nodes for faster plotting
north_line_ylist list of coordinates of north nodes for faster plotting
plot_yn [ 'y' | 'n' ] plot on instantiation
radio_res matplotlib.widget.radio instance for change resistivity
rect_selector matplotlib.widget.rect_selector
res np.ndarray(nx, ny, nz) for model in linear resistivity
res_copy copy of res for undo
res_dict dictionary of segmented resistivity values
res_list list of resistivity values for model linear scale
res_model np.ndarray(nx, ny, nz) of resistivity values from
res_list (linear scale)
res_model_int np.ndarray(nx, ny, nz) of integer values corresponding
to res_list for initial model
res_value current resistivty value of radio_res
save_path path to save initial file to
station_east station locations in east direction
station_north station locations in north direction
xlimits limits of plot in e-w direction
ylimits limits of plot in n-s direction
=================== =======================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
#be sure to initialize Model
Model.__init__(self, model_fn=model_fn, **kwargs)
self.data_fn = data_fn
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model_rw.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
elif self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = os.getcwd()
#station locations in relative coordinates read from data file
self.station_east = None
self.station_north = None
#--> set map scale
self.map_scale = kwargs.pop('map_scale', 'km')
self.m_width = 100
self.m_height = 100
#--> scale the map coordinates
if self.map_scale=='km':
self.dscale = 1000.
if self.map_scale=='m':
self.dscale = 1.
#figure attributes
self.fig = None
self.ax1 = None
self.ax2 = None
self.cb = None
self.east_line_xlist = None
self.east_line_ylist = None
self.north_line_xlist = None
self.north_line_ylist = None
#make a default resistivity list to change values
self._res_sea = 0.3
self._res_air = 1E12
self.res_dict = None
self.res_list = kwargs.pop('res_list', None)
if self.res_list is None:
self.set_res_list(np.array([self._res_sea, 1, 10, 50, 100, 500,
1000, 5000],
dtype=np.float))
#set initial resistivity value
self.res_value = self.res_list[0]
self.cov_arr = None
#--> set map limits
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.font_size = kwargs.pop('font_size', 7)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.cmap = kwargs.pop('cmap', cm.jet_r)
self.depth_index = kwargs.pop('depth_index', 0)
self.fdict = {'size':self.font_size+2, 'weight':'bold'}
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .8)
self.subplot_left = kwargs.pop('subplot_left', .01)
self.subplot_top = kwargs.pop('subplot_top', .93)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
#plot on initialization
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.get_model()
self.plot()
def set_res_list(self, res_list):
"""
on setting res_list also set the res_dict to correspond
"""
self.res_list = res_list
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in enumerate(self.res_list,1)])
if self.fig is not None:
plt.close()
self.plot()
#---read files-------------------------------------------------------------
def get_model(self):
"""
reads in initial file or model file and set attributes:
-resmodel
-northrid
-eastrid
-zgrid
-res_list if initial file
"""
#--> read in model file
self.read_model_file()
self.cov_arr = np.ones_like(self.res_model)
#--> read in data file if given
if self.data_fn is not None:
md_data = Data()
md_data.read_data_file(self.data_fn)
#get station locations
self.station_east = md_data.station_locations['rel_east']
self.station_north = md_data.station_locations['rel_north']
#get cell block sizes
self.m_height = np.median(self.nodes_north[5:-5])/self.dscale
self.m_width = np.median(self.nodes_east[5:-5])/self.dscale
#make a copy of original in case there are unwanted changes
self.res_copy = self.res_model.copy()
#---plot model-------------------------------------------------------------
def plot(self):
"""
plots the model with:
-a radio dial for depth slice
-radio dial for resistivity value
"""
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
#make sure there is a model to plot
if self.res_model is None:
self.get_model()
self.cmin = np.floor(np.log10(min(self.res_list)))
self.cmax = np.ceil(np.log10(max(self.res_list)))
#-->Plot properties
plt.rcParams['font.size'] = self.font_size
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.grid_east, self.grid_east[-1]*1.25)/self.dscale
plot_north = np.append(self.grid_north, self.grid_north[-1]*1.25)/self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
self.ax1 = self.fig.add_subplot(1, 1, 1, aspect='equal')
#transpose to make x--east and y--north
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#on plus or minus change depth slice
self.cid_depth = \
self.mesh_plot.figure.canvas.mpl_connect('key_press_event',
self._on_key_callback)
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(xmin=self.grid_east.min()/self.dscale,
xmax=self.grid_east.max()/self.dscale)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(ymin=self.grid_north.min()/self.dscale,
ymax=self.grid_north.max()/self.dscale)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(100*1./dscale))
#self.ax1.yaxis.set_minor_locator(MultipleLocator(100*1./dscale))
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot the grid if desired
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx/self.dscale, xx/self.dscale])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min()/self.dscale,
self.grid_north.max()/self.dscale])
self.east_line_ylist.append(None)
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min()/self.dscale,
self.grid_east.max()/self.dscale])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy/self.dscale, yy/self.dscale])
self.north_line_ylist.append(None)
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
# self.ax2 = mcb.make_axes(self.ax1, orientation='vertical', shrink=.35)
self.ax2 = self.fig.add_axes([.81, .45, .16, .03])
self.ax2.xaxis.set_ticks_position('top')
#seg_cmap = ws.cmap_discretize(self.cmap, len(self.res_list))
self.cb = mcb.ColorbarBase(self.ax2,cmap=self.cmap,
norm=colors.Normalize(vmin=self.cmin,
vmax=self.cmax),
orientation='horizontal')
self.cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size})
self.cb.set_ticks(np.arange(self.cmin, self.cmax+1))
self.cb.set_ticklabels([mtplottools.labeldict[cc]
for cc in np.arange(self.cmin, self.cmax+1)])
#make a resistivity radio button
#resrb = self.fig.add_axes([.85,.1,.1,.2])
#reslabels = ['{0:.4g}'.format(res) for res in self.res_list]
#self.radio_res = widgets.RadioButtons(resrb, reslabels,
# active=self.res_dict[self.res_value])
# slider_ax_bounds = list(self.cb.ax.get_position().bounds)
# slider_ax_bounds[0] += .1
slider_ax = self.fig.add_axes([.81, .5, .16, .03])
self.slider_res = widgets.Slider(slider_ax, 'Resistivity',
self.cmin, self.cmax,
valinit=2)
#make a rectangular selector
self.rect_selector = widgets.RectangleSelector(self.ax1,
self.rect_onselect,
drawtype='box',
useblit=True)
plt.show()
#needs to go after show()
self.slider_res.on_changed(self.set_res_value)
#self.radio_res.on_clicked(self.set_res_value)
def redraw_plot(self):
"""
redraws the plot
"""
current_xlimits = self.ax1.get_xlim()
current_ylimits = self.ax1.get_ylim()
self.ax1.cla()
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#plot the stations
if self.station_east is not None:
for ee,nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(current_xlimits)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(current_ylimits)
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot finite element mesh
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#be sure to redraw the canvas
self.fig.canvas.draw()
# def set_res_value(self, label):
# self.res_value = float(label)
# print 'set resistivity to ', label
# print self.res_value
def set_res_value(self, val):
self.res_value = 10**val
print 'set resistivity to ', self.res_value
def _on_key_callback(self,event):
"""
on pressing a key do something
"""
self.event_change_depth = event
#go down a layer on push of +/= keys
if self.event_change_depth.key == '=':
self.depth_index += 1
if self.depth_index>len(self.grid_z)-1:
self.depth_index = len(self.grid_z)-1
print 'already at deepest depth'
print 'Plotting Depth {0:.3f}'.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#go up a layer on push of - key
elif self.event_change_depth.key == '-':
self.depth_index -= 1
if self.depth_index < 0:
self.depth_index = 0
print 'Plotting Depth {0:.3f} '.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#exit plot on press of q
elif self.event_change_depth.key == 'q':
self.event_change_depth.canvas.mpl_disconnect(self.cid_depth)
plt.close(self.event_change_depth.canvas.figure)
self.rewrite_model_file()
#copy the layer above
elif self.event_change_depth.key == 'a':
try:
if self.depth_index == 0:
print 'No layers above'
else:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index-1]
except IndexError:
print 'No layers above'
self.redraw_plot()
#copy the layer below
elif self.event_change_depth.key == 'b':
try:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index+1]
except IndexError:
print 'No more layers below'
self.redraw_plot()
#undo
elif self.event_change_depth.key == 'u':
if type(self.xchange) is int and type(self.ychange) is int:
self.res_model[self.ychange, self.xchange, self.depth_index] =\
self.res_copy[self.ychange, self.xchange, self.depth_index]
else:
for xx in self.xchange:
for yy in self.ychange:
self.res_model[yy, xx, self.depth_index] = \
self.res_copy[yy, xx, self.depth_index]
self.redraw_plot()
def change_model_res(self, xchange, ychange):
"""
change resistivity values of resistivity model
"""
if type(xchange) is int and type(ychange) is int:
self.res_model[ychange, xchange, self.depth_index] = self.res_value
else:
for xx in xchange:
for yy in ychange:
self.res_model[yy, xx, self.depth_index] = self.res_value
self.redraw_plot()
def rect_onselect(self, eclick, erelease):
"""
on selecting a rectangle change the colors to the resistivity values
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.xchange = self._get_east_index(x1, x2)
self.ychange = self._get_north_index(y1, y2)
#reset values of resistivity
self.change_model_res(self.xchange, self.ychange)
def _get_east_index(self, x1, x2):
"""
get the index value of the points to be changed
"""
if x1 < x2:
xchange = np.where((self.grid_east/self.dscale >= x1) & \
(self.grid_east/self.dscale <= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x1)[0][0]-1
return [xchange]
if x1 > x2:
xchange = np.where((self.grid_east/self.dscale <= x1) & \
(self.grid_east/self.dscale >= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x2)[0][0]-1
return [xchange]
#check the edges to see if the selection should include the square
xchange = np.append(xchange, xchange[0]-1)
xchange.sort()
return xchange
def _get_north_index(self, y1, y2):
"""
get the index value of the points to be changed in north direction
need to flip the index because the plot is flipped
"""
if y1 < y2:
ychange = np.where((self.grid_north/self.dscale > y1) & \
(self.grid_north/self.dscale < y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y1)[0][0]-1
return [ychange]
elif y1 > y2:
ychange = np.where((self.grid_north/self.dscale < y1) & \
(self.grid_north/self.dscale > y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y2)[0][0]-1
return [ychange]
ychange -= 1
ychange = np.append(ychange, ychange[-1]+1)
return ychange
def rewrite_model_file(self, model_fn=None, save_path=None,
model_fn_basename=None):
"""
write an initial file for wsinv3d from the model created.
"""
if save_path is not None:
self.save_path = save_path
self.model_fn = model_fn
if model_fn_basename is not None:
self.model_fn_basename = model_fn_basename
self.write_model_file()
#==============================================================================
# plot response
#==============================================================================
class moved_PlotResponse(object):
"""
plot data and response
Plots the real and imaginary impedance and induction vector if present.
:Example: ::
>>> import mtpy.modeling.new_modem as modem
>>> dfn = r"/home/MT/ModEM/Inv1/DataFile.dat"
>>> rfn = r"/home/MT/ModEM/Inv1/Test_resp_000.dat"
>>> mrp = modem.PlotResponse(data_fn=dfn, resp_fn=rfn)
>>> # plot only the TE and TM modes
>>> mrp.plot_component = 2
>>> mrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.ms_r = kwargs.pop('ms_r', 3)
self.lw = kwargs.pop('lw', .5)
self.lw_r = kwargs.pop('lw_r', 1.0)
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits_d = kwargs.pop('phase_limits_d', None)
self.phase_limits_od = kwargs.pop('phase_limits_od', None)
self.res_limits_d = kwargs.pop('res_limits_d', None)
self.res_limits_od = kwargs.pop('res_limits_od', None)
self.tipper_limits = kwargs.pop('tipper_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .98)
self.subplot_left = kwargs.pop('subplot_left', .08)
self.subplot_top = kwargs.pop('subplot_top', .85)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.legend_loc = 'upper center'
self.legend_pos = (.5, 1.18)
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plot
"""
self.data_object = Data()
self.data_object.read_data_file(self.data_fn)
#get shape of impedance tensors
ns = len(self.data_object.mt_dict.keys())
#read in response files
if self.resp_fn != None:
self.resp_object = []
if type(self.resp_fn) is not list:
resp_obj = Data()
resp_obj.read_data_file(self.resp_fn)
self.resp_object = [resp_obj]
else:
for rfile in self.resp_fn:
resp_obj = Data()
resp_obj.read_data_file(rfile)
self.resp_object.append(resp_obj)
#get number of response files
nr = len(self.resp_object)
if type(self.plot_type) is list:
ns = len(self.plot_type)
#--> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size':self.font_size+2, 'weight':'bold'}
if self.plot_z == True:
h_ratio = [1, 1, .5]
elif self.plot_z == False:
h_ratio = [1.5, 1, .5]
ax_list = []
line_list = []
label_list = []
#--> make key word dictionaries for plotting
kw_xx = {'color':self.cted,
'marker':self.mted,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmd,
'marker':self.mtmd,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
if self.plot_type != '1':
pstation_list = []
if type(self.plot_type) is not list:
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.mt_dict.keys()):
if type(station) is not int:
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(station)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = self.data_object.mt_dict.keys()
for jj, station in enumerate(pstation_list):
z_obj = self.data_object.mt_dict[station].Z
t_obj = self.data_object.mt_dict[station].Tipper
period = self.data_object.period_list
print 'Plotting: {0}'.format(station)
#convert to apparent resistivity and phase
z_obj._compute_res_phase()
#find locations where points have been masked
nzxx = np.nonzero(z_obj.z[:, 0, 0])[0]
nzxy = np.nonzero(z_obj.z[:, 0, 1])[0]
nzyx = np.nonzero(z_obj.z[:, 1, 0])[0]
nzyy = np.nonzero(z_obj.z[:, 1, 1])[0]
ntx = np.nonzero(t_obj.tipper[:, 0, 0])[0]
nty = np.nonzero(t_obj.tipper[:, 0, 1])[0]
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(z_obj.freq)
plot_res = abs(z_obj.z.real*scaling)
plot_res_err = abs(z_obj.z_err*scaling)
plot_phase = abs(z_obj.z.imag*scaling)
plot_phase_err = abs(z_obj.z_err*scaling)
h_ratio = [1, 1, .5]
elif self.plot_z == False:
plot_res = z_obj.resistivity
plot_res_err = z_obj.resistivity_err
plot_phase = z_obj.phase
plot_phase_err = z_obj.phase_err
h_ratio = [1.5, 1, .5]
try:
self.res_limits_d = (10**(np.floor(np.log10(min([plot_res[nzxx, 0, 0].min(),
plot_res[nzyy, 1, 1].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxx, 0, 0].max(),
plot_res[nzyy, 1, 1].max()])))))
except ValueError:
self.res_limits_d = None
try:
self.res_limits_od = (10**(np.floor(np.log10(min([plot_res[nzxy, 0, 1].min(),
plot_res[nzyx, 1, 0].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxy, 0, 1].max(),
plot_res[nzyx, 1, 0].max()])))))
except ValueError:
self.res_limits_od = None
#make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
#set the grid of subplots
if np.all(t_obj.tipper == 0.0) == True:
self.plot_tipper = False
else:
self.plot_tipper = True
self.tipper_limits = (np.round(min([t_obj.tipper[ntx, 0, 0].real.min(),
t_obj.tipper[nty, 0, 1].real.min(),
t_obj.tipper[ntx, 0, 0].imag.min(),
t_obj.tipper[nty, 0, 1].imag.min()]),
1),
np.round(max([t_obj.tipper[ntx, 0, 0].real.max(),
t_obj.tipper[nty, 0, 1].real.max(),
t_obj.tipper[ntx, 0, 0].imag.max(),
t_obj.tipper[nty, 0, 1].imag.max()]),
1))
gs = gridspec.GridSpec(3, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx, sharey=axrxy)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx, sharey=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
axtxr = fig.add_subplot(gs[2, 0], sharex=axrxx)
axtxi = fig.add_subplot(gs[2, 1], sharex=axrxx, sharey=axtxr)
axtyr = fig.add_subplot(gs[2, 2], sharex=axrxx)
axtyi = fig.add_subplot(gs[2, 3], sharex=axrxx, sharey=axtyr)
self.ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy,
axtxr, axtxi, axtyr, axtyi]
#---------plot the apparent resistivity-----------------------------------
#plot each component in its own subplot
# plot data response
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
plot_res[nzxx, 0, 0],
plot_res_err[nzxx, 0, 0],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
plot_res[nzxy, 0, 1],
plot_res_err[nzxy, 0, 1],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
plot_res[nzyx, 1, 0],
plot_res_err[nzyx, 1, 0],
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
plot_res[nzyy, 1, 1],
plot_res_err[nzyy, 1, 1],
**kw_yy)
#plot phase
epxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
plot_phase[nzxx, 0, 0],
plot_phase_err[nzxx, 0, 0],
**kw_xx)
epxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
plot_phase[nzxy, 0, 1],
plot_phase_err[nzxy, 0, 1],
**kw_xx)
epyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
plot_phase[nzyx, 1, 0],
plot_phase_err[nzyx, 1, 0],
**kw_yy)
epyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
plot_phase[nzyy, 1, 1],
plot_phase_err[nzyy, 1, 1],
**kw_yy)
#plot tipper
if self.plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
eptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
epty = mtplottools.plot_errorbar(axtyi,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
#----------------------------------------------
# get error bar list for editing later
if self.plot_tipper == False:
try:
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]]]
line_list = [[erxx[0]], [erxy[0]], [eryx[0]], [eryy[0]]]
except IndexError:
print 'Found no Z components for {0}'.format(self.station)
line_list = [[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
else:
try:
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]],
[ertx[0]], [erty[0]]]
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]],
[ertx[1][0], ertx[1][1], ertx[2][0]],
[erty[1][0], erty[1][1], erty[2][0]]]
except IndexError:
print 'Found no Z components for {0}'.format(station)
line_list = [[None], [None],
[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
#------------------------------------------
# make things look nice
# set titles of the Z components
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
for ax, label in zip(self.ax_list[0:4], label_list):
ax.set_title(label[0],fontdict={'size':self.font_size+2,
'weight':'bold'})
# set legends for tipper components
# fake a line
l1 = plt.Line2D([0], [0], linewidth=0, color='w', linestyle='None',
marker='.')
t_label_list = ['Re{$T_x$}', 'Im{$T_x$}', 'Re{$T_y$}', 'Im{$T_y$}']
label_list += [['$T_{x}$'], ['$T_{y}$']]
for ax, label in zip(self.ax_list[-4:], t_label_list):
ax.legend([l1], [label], loc='upper left',
markerscale=.01,
borderaxespad=.05,
labelspacing=.01,
handletextpad=.05,
borderpad=.05,
prop={'size':max([self.font_size, 6])})
#set axis properties
for aa, ax in enumerate(self.ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
if aa < 8:
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
# plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == True:
ax.set_yscale('log')
else:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa < 4 and self.plot_z is False:
ax.set_yscale('log')
if aa == 0 or aa == 3:
ax.set_ylim(self.res_limits_d)
elif aa == 1 or aa == 2:
ax.set_ylim(self.res_limits_od)
if aa > 3 and aa < 8 and self.plot_z is False:
ax.yaxis.set_major_formatter(MultipleLocator(10))
if self.phase_limits_d is not None:
ax.set_ylim(self.phase_limits_d)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 8:
ax.set_ylabel('Tipper',
fontdict=fontdict)
if aa > 7:
ax.yaxis.set_major_locator(MultipleLocator(.1))
if self.tipper_limits is not None:
ax.set_ylim(self.tipper_limits)
else:
pass
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0])))*1.01,
xmax=10**(np.ceil(np.log10(period[-1])))*.99)
ax.grid(True, alpha=.25)
ylabels = ax.get_yticks().tolist()
if aa < 8:
ylabels[-1] = ''
ylabels[0] = ''
ax.set_yticklabels(ylabels)
plt.setp(ax.get_xticklabels(), visible=False)
##----------------------------------------------
#plot model response
if self.resp_object is not None:
for resp_obj in self.resp_object:
resp_z_obj = resp_obj.mt_dict[station].Z
resp_z_err = np.nan_to_num((z_obj.z-resp_z_obj.z)/z_obj.z_err)
resp_z_obj._compute_res_phase()
resp_t_obj = resp_obj.mt_dict[station].Tipper
resp_t_err = np.nan_to_num((t_obj.tipper-resp_t_obj.tipper)/t_obj.tipper_err)
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(resp_z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(resp_z_obj.freq)
r_plot_res = abs(resp_z_obj.z.real*scaling)
r_plot_phase = abs(resp_z_obj.z.imag*scaling)
elif self.plot_z == False:
r_plot_res = resp_z_obj.resistivity
r_plot_phase = resp_z_obj.phase
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
#--> make key word dictionaries for plotting
kw_xx = {'color':self.ctem,
'marker':self.mtem,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmm,
'marker':self.mtmm,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
# plot data response
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
r_plot_res[nzxx, 0, 0],
None,
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
r_plot_res[nzxy, 0, 1],
None,
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
r_plot_res[nzyx, 1, 0],
None,
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
r_plot_res[nzyy, 1, 1],
None,
**kw_yy)
#plot phase
repxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
r_plot_phase[nzxx, 0, 0],
None,
**kw_xx)
repxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
r_plot_phase[nzxy, 0, 1],
None,
**kw_xx)
repyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
r_plot_phase[nzyx, 1, 0],
None,
**kw_yy)
repyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
r_plot_phase[nzyy, 1, 1],
None,
**kw_yy)
#plot tipper
if self.plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
None,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
None,
**kw_yy)
reptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
None,
**kw_xx)
repty = mtplottools.plot_errorbar(axtyi,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
None,
**kw_yy)
if self.plot_tipper == False:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
line_list[4] += [rertx[0]]
line_list[5] += [rerty[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
label_list[4] += ['$T^m_{x}$ '+
'rms={0:.2f}'.format(resp_t_err[:, 0, 0].std())]
label_list[5] += ['$T^m_{y}$'+
'rms={0:.2f}'.format(resp_t_err[:, 0, 1].std())]
legend_ax_list = self.ax_list[0:4]
# if self.plot_tipper == True:
# legend_ax_list += [self.ax_list[-4], self.ax_list[-2]]
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size, 5])})
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
fig = plt.gcf()
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
#==============================================================================
# plot phase tensors
#==============================================================================
class moved_PlotPTMaps(mtplottools.MTEllipse):
"""
Plot phase tensor maps including residual pt if response file is input.
:Plot only data for one period: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, plot_period_list=[0])
:Plot data and model response: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn,
>>> ... plot_period_list=[0])
>>> # adjust colorbar
>>> ptm.cb_res_pad = 1.25
>>> ptm.redraw_plot()
========================== ================================================
Attributes Description
========================== ================================================
cb_pt_pad percentage from top of axes to place pt
color bar. *default* is .90
cb_res_pad percentage from bottom of axes to place
resistivity color bar. *default* is 1.2
cb_residual_tick_step tick step for residual pt. *default* is 3
cb_tick_step tick step for phase tensor color bar,
*default* is 45
data np.ndarray(n_station, n_periods, 2, 2)
impedance tensors for station data
data_fn full path to data fle
dscale scaling parameter depending on map_scale
ellipse_cmap color map for pt ellipses. *default* is
mt_bl2gr2rd
ellipse_colorby [ 'skew' | 'skew_seg' | 'phimin' | 'phimax'|
'phidet' | 'ellipticity' ] parameter to color
ellipses by. *default* is 'phimin'
ellipse_range (min, max, step) min and max of colormap, need
to input step if plotting skew_seg
ellipse_size relative size of ellipses in map_scale
ew_limits limits of plot in e-w direction in map_scale
units. *default* is None, scales to station
area
fig_aspect aspect of figure. *default* is 1
fig_dpi resolution in dots-per-inch. *default* is 300
fig_list list of matplotlib.figure instances for each
figure plotted.
fig_size [width, height] in inches of figure window
*default* is [6, 6]
font_size font size of ticklabels, axes labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
model_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map.
*default* is km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) limits of plot in n-s direction
*default* is None, viewing area is station area
pad_east padding from extreme stations in east direction
pad_north padding from extreme stations in north direction
period_list list of periods from data
plot_grid [ 'y' | 'n' ] 'y' to plot grid lines
*default* is 'n'
plot_period_list list of period index values to plot
*default* is None
plot_yn ['y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_cmap colormap for resisitivity values.
*default* is 'jet_r'
res_limits (min, max) resistivity limits in log scale
*default* is (0, 4)
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
residual_cmap color map for pt residuals.
*default* is 'mt_wh2or'
resp np.ndarray(n_stations, n_periods, 2, 2)
impedance tensors for model response
resp_fn full path to response file
save_path directory to save figures to
save_plots [ 'y' | 'n' ] 'y' to save plots to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
========================== ================================================
"""
def __init__(self, data_fn=None, resp_fn=None, model_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.resp_fn = resp_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.plot_period_list = kwargs.pop('plot_period_list', None)
self.period_dict = None
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.pad_east = kwargs.pop('pad_east', 2000)
self.pad_north = kwargs.pop('pad_north', 2000)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.residual_cmap = kwargs.pop('residual_cmap', 'mt_wh2or')
self.font_size = kwargs.pop('font_size', 7)
self.cb_tick_step = kwargs.pop('cb_tick_step', 45)
self.cb_residual_tick_step = kwargs.pop('cb_residual_tick_step', 3)
self.cb_pt_pad = kwargs.pop('cb_pt_pad', 1.2)
self.cb_res_pad = kwargs.pop('cb_res_pad', .5)
self.res_limits = kwargs.pop('res_limits', (0,4))
self.res_cmap = kwargs.pop('res_cmap', 'jet_r')
#--> set the ellipse properties -------------------
self._ellipse_dict = kwargs.pop('ellipse_dict', {'size':2})
self._read_ellipse_dict()
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.subplot_hspace = .2
self.subplot_wspace = .05
self.data_obj = None
self.resp_obj = None
self.model_obj = None
self.period_list = None
self.pt_data_arr = None
self.pt_resp_arr = None
self.pt_resid_arr = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _read_files(self):
"""
get information from files
"""
#--> read in data file
self.data_obj = Data()
self.data_obj.read_data_file(self.data_fn)
#--> read response file
if self.resp_fn is not None:
self.resp_obj = Data()
self.resp_obj.read_data_file(self.resp_fn)
#--> read mode file
if self.model_fn is not None:
self.model_obj = Model()
self.model_obj.read_model_file(self.model_fn)
self._get_plot_period_list()
self._get_pt()
def _get_plot_period_list(self):
"""
get periods to plot from input or data file
"""
#--> get period list to plot
if self.plot_period_list is None:
self.plot_period_list = self.data_obj.period_list
else:
if type(self.plot_period_list) is list:
#check if entries are index values or actual periods
if type(self.plot_period_list[0]) is int:
self.plot_period_list = [self.data_obj.period_list[ii]
for ii in self.plot_period_list]
else:
pass
elif type(self.plot_period_list) is int:
self.plot_period_list = self.data_obj.period_list[self.plot_period_list]
elif type(self.plot_period_list) is float:
self.plot_period_list = [self.plot_period_list]
self.period_dict = dict([(key, value) for value, key in
enumerate(self.data_obj.period_list)])
def _get_pt(self):
"""
put pt parameters into something useful for plotting
"""
ns = len(self.data_obj.mt_dict.keys())
nf = len(self.data_obj.period_list)
data_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
if self.resp_fn is not None:
model_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
res_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float),
('geometric_mean', np.float)])
for ii, key in enumerate(self.data_obj.mt_dict.keys()):
east = self.data_obj.mt_dict[key].grid_east/self.dscale
north = self.data_obj.mt_dict[key].grid_north/self.dscale
dpt = self.data_obj.mt_dict[key].pt
data_pt_arr[:, ii]['east'] = east
data_pt_arr[:, ii]['north'] = north
data_pt_arr[:, ii]['phimin'] = dpt.phimin[0]
data_pt_arr[:, ii]['phimax'] = dpt.phimax[0]
data_pt_arr[:, ii]['azimuth'] = dpt.azimuth[0]
data_pt_arr[:, ii]['skew'] = dpt.beta[0]
if self.resp_fn is not None:
mpt = self.resp_obj.mt_dict[key].pt
try:
rpt = mtpt.ResidualPhaseTensor(pt_object1=dpt,
pt_object2=mpt)
rpt = rpt.residual_pt
res_pt_arr[:, ii]['east'] = east
res_pt_arr[:, ii]['north'] = north
res_pt_arr[:, ii]['phimin'] = rpt.phimin[0]
res_pt_arr[:, ii]['phimax'] = rpt.phimax[0]
res_pt_arr[:, ii]['azimuth'] = rpt.azimuth[0]
res_pt_arr[:, ii]['skew'] = rpt.beta[0]
res_pt_arr[:, ii]['geometric_mean'] = np.sqrt(abs(rpt.phimin[0]*\
rpt.phimax[0]))
except mtex.MTpyError_PT:
print key, dpt.pt.shape, mpt.pt.shape
model_pt_arr[:, ii]['east'] = east
model_pt_arr[:, ii]['north'] = north
model_pt_arr[:, ii]['phimin'] = mpt.phimin[0]
model_pt_arr[:, ii]['phimax'] = mpt.phimax[0]
model_pt_arr[:, ii]['azimuth'] = mpt.azimuth[0]
model_pt_arr[:, ii]['skew'] = mpt.beta[0]
#make these attributes
self.pt_data_arr = data_pt_arr
if self.resp_fn is not None:
self.pt_resp_arr = model_pt_arr
self.pt_resid_arr = res_pt_arr
def plot(self):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km.
"""
#--> read in data first
if self.data_obj is None:
self._read_files()
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
# make a grid of subplots
gs = gridspec.GridSpec(1, 3, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
#set some parameters for the colorbar
ckmin = float(self.ellipse_range[0])
ckmax = float(self.ellipse_range[1])
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
raise ValueError('Need to input range as (min, max, step)')
else:
ckstep = 3
bounds = np.arange(ckmin, ckmax+ckstep, ckstep)
# set plot limits to be the station area
if self.ew_limits == None:
east_min = self.data_obj.data_array['rel_east'].min()-\
self.pad_east
east_max = self.data_obj.data_array['rel_east'].max()+\
self.pad_east
self.ew_limits = (east_min/self.dscale, east_max/self.dscale)
if self.ns_limits == None:
north_min = self.data_obj.data_array['rel_north'].min()-\
self.pad_north
north_max = self.data_obj.data_array['rel_north'].max()+\
self.pad_north
self.ns_limits = (north_min/self.dscale, north_max/self.dscale)
#-------------plot phase tensors------------------------------------
#for ff, per in enumerate(self.plot_period_list):
for ff, per in enumerate(self.plot_period_list[:1]):
#FZ
print(ff,per)
print(self.plot_period_list)
data_ii = self.period_dict[per]
print 'Plotting Period: {0:.5g}'.format(per)
fig = plt.figure('{0:.5g}'.format(per), figsize=self.fig_size,
dpi=self.fig_dpi)
fig.clf()
if self.resp_fn is not None:
axd = fig.add_subplot(gs[0, 0], aspect='equal')
axm = fig.add_subplot(gs[0, 1], aspect='equal')
axr = fig.add_subplot(gs[0, 2], aspect='equal')
ax_list = [axd, axm, axr]
else:
axd = fig.add_subplot(gs[0, :], aspect='equal')
ax_list = [axd]
#plot model below the phase tensors
if self.model_fn is not None:
approx_depth, d_index = ws.estimate_skin_depth(self.model_obj.res_model.copy(),
self.model_obj.grid_z.copy()/self.dscale,
per,
dscale=self.dscale)
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.model_obj.grid_east,
self.model_obj.grid_east[-1]*1.25)/\
self.dscale
plot_north = np.append(self.model_obj.grid_north,
self.model_obj.grid_north[-1]*1.25)/\
self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
for ax in ax_list:
plot_res = np.log10(self.model_obj.res_model[:, :, d_index].T)
ax.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.res_cmap,
vmin=self.res_limits[0],
vmax=self.res_limits[1])
#--> plot data phase tensors
for pt in self.pt_data_arr[data_ii]:
eheight = pt['phimin']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = pt['phimax']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipse = Ellipse((pt['east'],
pt['north']),
width=ewidth,
height=eheight,
angle=90-pt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axd.add_artist(ellipse)
#-----------plot response phase tensors---------------
if self.resp_fn is not None:
rcmin = np.floor(self.pt_resid_arr['geometric_mean'].min())
rcmax = np.floor(self.pt_resid_arr['geometric_mean'].max())
for mpt, rpt in zip(self.pt_resp_arr[data_ii],
self.pt_resid_arr[data_ii]):
eheight = mpt['phimin']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = mpt['phimax']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipsem = Ellipse((mpt['east'],
mpt['north']),
width=ewidth,
height=eheight,
angle=90-mpt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axm.add_artist(ellipsem)
#-----------plot residual phase tensors---------------
eheight = rpt['phimin']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = rpt['phimax']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipser = Ellipse((rpt['east'],
rpt['north']),
width=ewidth,
height=eheight,
angle=rpt['azimuth'])
#get ellipse color
rpt_color = np.sqrt(abs(rpt['phimin']*rpt['phimax']))
if self.ellipse_cmap.find('seg')>0:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax))
axr.add_artist(ellipser)
#--> set axes properties
# data
axd.set_xlim(self.ew_limits)
axd.set_ylim(self.ns_limits)
axd.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
axd.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=font_dict)
#make a colorbar for phase tensors
#bb = axd.axes.get_position().bounds
bb = axd.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbaxd = fig.add_axes(cb_location)
cbd = mcb.ColorbarBase(cbaxd,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cbd.ax.xaxis.set_label_position('top')
cbd.ax.xaxis.set_label_coords(.5, 1.75)
cbd.set_label(mtplottools.ckdict[self.ellipse_colorby])
cbd.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
axd.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Data',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
#Model and residual
if self.resp_fn is not None:
for aa, ax in enumerate([axm, axr]):
ax.set_xlim(self.ew_limits)
ax.set_ylim(self.ns_limits)
ax.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
#make a colorbar ontop of axis
bb = ax.axes.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbax = fig.add_axes(cb_location)
if aa == 0:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(mtplottools.ckdict[self.ellipse_colorby])
cb.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Model',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
else:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.residual_cmap],
norm=Normalize(vmin=rcmin,
vmax=rcmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(r"$\sqrt{\Phi_{min} \Phi_{max}}$")
cb_ticks = [rcmin, (rcmax-rcmin)/2, rcmax]
cb.set_ticks(cb_ticks)
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Residual',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
if self.model_fn is not None:
for ax in ax_list:
ax.tick_params(direction='out')
bb = ax.axes.get_position().bounds
y1 = .25*(2-(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_position = (3.0*bb[2]/5+bb[0],
y1*self.cb_res_pad, .35*bb[2], .02)
cbax = fig.add_axes(cb_position)
cb = mcb.ColorbarBase(cbax,
cmap=self.res_cmap,
norm=Normalize(vmin=self.res_limits[0],
vmax=self.res_limits[1]),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.5)
cb.set_label('Resistivity ($\Omega \cdot$m)')
cb_ticks = np.arange(np.floor(self.res_limits[0]),
np.ceil(self.res_limits[1]+1), 1)
cb.set_ticks(cb_ticks)
cb.set_ticklabels([mtplottools.labeldict[ctk] for ctk in cb_ticks])
plt.show()
self.fig_list.append(fig)
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_path=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_path) == False:
try:
os.mkdir(save_path)
except:
raise IOError('Need to input a correct directory path')
for fig in self.fig_list:
per = fig.canvas.get_window_title()
save_fn = os.path.join(save_path, 'PT_DepthSlice_{0}s.{1}'.format(
per, file_format))
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot depth slices
#==============================================================================
class moved_PlotDepthSlice(object):
"""
Plots depth slices of resistivity model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.climits = kwargs.pop('climits', (0,4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop('cb_orientation', 'horizontal')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot depth slices
"""
#--> get information from files
self.read_files()
fdict = {'size':self.font_size+2, 'weight':'bold'}
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
#create an list of depth slices to plot
if self.depth_index == None:
zrange = range(self.grid_z.shape[0])
elif type(self.depth_index) is int:
zrange = [self.depth_index]
elif type(self.depth_index) is list or \
type(self.depth_index) is np.ndarray:
zrange = self.depth_index
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-5])
else:
xlimits = self.ew_limits
if self.ns_limits == None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-5])
else:
ylimits = self.ns_limits
#make a mesh grid of north and east
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
plt.rcParams['font.size'] = self.font_size
#--> plot depths into individual figures
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
#set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(self.xminorticks/self.dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(self.yminorticks/self.dscale))
ax1.set_ylabel('Northing ('+self.map_scale+')',fontdict=fdict)
ax1.set_xlabel('Easting ('+self.map_scale+')',fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
#plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
if self.cb_location is None:
if self.cb_orientation == 'horizontal':
self.cb_location = (ax1.axes.figbox.bounds[3]-.225,
ax1.axes.figbox.bounds[1]+.05,.3,.025)
elif self.cb_orientation == 'vertical':
self.cb_location = ((ax1.axes.figbox.bounds[2]-.15,
ax1.axes.figbox.bounds[3]-.21,.025,.3))
ax2 = fig.add_axes(self.cb_location)
cb = mcb.ColorbarBase(ax2,
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]),
orientation=self.cb_orientation)
if self.cb_orientation == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5,1.3)
elif self.cb_orientation == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(self.climits[0],self.climits[1]+1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(self.climits[0],
self.climits[1]+1)])
self.fig_list.append(fig)
#--> save plots to a common folder
if self.save_plots == 'y':
fig.savefig(os.path.join(self.save_path,
"Depth_{}_{:.4f}.png".format(ii, self.grid_z[ii])),
dpi=self.fig_dpi, bbox_inches='tight')
fig.clear()
plt.close()
else:
pass
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from WS3DINV")
#==============================================================================
# plot slices
#==============================================================================
class PlotSlices(object):
"""
plot all slices and be able to scroll through the model
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> mfn = r"/home/modem/Inv1/Modular_NLCG_100.rho"
>>> dfn = r"/home/modem/Inv1/ModEM_data.dat"
>>> pds = ws.PlotSlices(model_fn=mfn, data_fn=dfn)
======================= ===================================================
Buttons Description
======================= ===================================================
'e' moves n-s slice east by one model block
'w' moves n-s slice west by one model block
'n' moves e-w slice north by one model block
'm' moves e-w slice south by one model block
'd' moves depth slice down by one model block
'u' moves depth slice up by one model block
======================= ===================================================
======================= ===================================================
Attributes Description
======================= ===================================================
ax_en matplotlib.axes instance for depth slice map view
ax_ez matplotlib.axes instance for e-w slice
ax_map matplotlib.axes instance for location map
ax_nz matplotlib.axes instance for n-s slice
climits (min , max) color limits on resistivity in log
scale. *default* is (0, 4)
cmap name of color map for resisitiviy.
*default* is 'jet_r'
data_fn full path to data file name
dscale scaling parameter depending on map_scale
east_line_xlist list of line nodes of east grid for faster plotting
east_line_ylist list of line nodes of east grid for faster plotting
ew_limits (min, max) limits of e-w in map_scale units
*default* is None and scales to station area
fig matplotlib.figure instance for figure
fig_aspect aspect ratio of plots. *default* is 1
fig_dpi resolution of figure in dots-per-inch
*default* is 300
fig_num figure instance number
fig_size [width, height] of figure window.
*default* is [6,6]
font_dict dictionary of font keywords, internally created
font_size size of ticklables in points, axes labes are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
index_east index value of grid_east being plotted
index_north index value of grid_north being plotted
index_vertical index value of grid_z being plotted
initial_fn full path to initial file
key_press matplotlib.canvas.connect instance
map_scale [ 'm' | 'km' ] scale of map. *default* is km
mesh_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_north np.meshgrid(grid_east, grid_north)[1]
mesh_ez_east np.meshgrid(grid_east, grid_z)[0]
mesh_ez_vertical np.meshgrid(grid_east, grid_z)[1]
mesh_north np.meshgrid(grid_east, grid_north)[1]
mesh_nz_north np.meshgrid(grid_north, grid_z)[0]
mesh_nz_vertical np.meshgrid(grid_north, grid_z)[1]
model_fn full path to model file
ms size of station markers in points. *default* is 2
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
north_line_xlist list of line nodes north grid for faster plotting
north_line_ylist list of line nodes north grid for faster plotting
ns_limits (min, max) limits of plots in n-s direction
*default* is None, set veiwing area to station area
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
station_color color of station marker. *default* is black
station_dict_east location of stations for each east grid row
station_dict_north location of stations for each north grid row
station_east location of stations in east direction
station_fn full path to station file
station_font_color color of station label
station_font_pad padding between station marker and label
station_font_rotation angle of station label
station_font_size font size of station label
station_font_weight weight of font for station label
station_id [min, max] index values for station labels
station_marker station marker
station_names name of stations
station_north location of stations in north direction
subplot_bottom distance between axes and bottom of figure window
subplot_hspace distance between subplots in vertical direction
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
subplot_wspace distance between subplots in horizontal direction
title title of plot
z_limits (min, max) limits in vertical direction,
======================= ===================================================
"""
def __init__(self, model_fn, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.font_size = kwargs.pop('font_size', 7)
self.subplot_wspace = .20
self.subplot_hspace = .30
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .97
self.subplot_bottom = .1
self.index_vertical = kwargs.pop('index_vertical', 0)
self.index_east = kwargs.pop('index_east', 0)
self.index_north = kwargs.pop('index_north', 0)
self.cmap = kwargs.pop('cmap', 'jet_r')
self.climits = kwargs.pop('climits', (0, 4))
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.z_limits = kwargs.pop('z_limits', None)
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.station_id = kwargs.pop('station_id', None)
self.station_font_size = kwargs.pop('station_font_size', 8)
self.station_font_pad = kwargs.pop('station_font_pad', 1.0)
self.station_font_weight = kwargs.pop('station_font_weight', 'bold')
self.station_font_rotation = kwargs.pop('station_font_rotation', 60)
self.station_font_color = kwargs.pop('station_font_color', 'k')
self.station_marker = kwargs.pop('station_marker',
r"$\blacktriangledown$")
self.station_color = kwargs.pop('station_color', 'k')
self.ms = kwargs.pop('ms', 10)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot:
east vs. vertical,
north vs. vertical,
east vs. north
"""
self.read_files()
self.get_station_grid_locations()
print "=============== ==============================================="
print " Buttons Description "
print "=============== ==============================================="
print " 'e' moves n-s slice east by one model block"
print " 'w' moves n-s slice west by one model block"
print " 'n' moves e-w slice north by one model block"
print " 'm' moves e-w slice south by one model block"
print " 'd' moves depth slice down by one model block"
print " 'u' moves depth slice up by one model block"
print "=============== ==============================================="
self.font_dict = {'size':self.font_size+2, 'weight':'bold'}
#--> set default font size
plt.rcParams['font.size'] = self.font_size
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
if self.z_limits == None:
depth_limit = max([(abs(self.ew_limits[0])+abs(self.ew_limits[1])),
(abs(self.ns_limits[0])+abs(self.ns_limits[1]))])
self.z_limits = (-5000/self.dscale, depth_limit)
self.fig = plt.figure(self.fig_num, figsize=self.fig_size,
dpi=self.fig_dpi)
plt.clf()
gs = gridspec.GridSpec(2, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace)
#make subplots
self.ax_ez = self.fig.add_subplot(gs[0, 0], aspect=self.fig_aspect)
self.ax_nz = self.fig.add_subplot(gs[1, 1], aspect=self.fig_aspect)
self.ax_en = self.fig.add_subplot(gs[1, 0], aspect=self.fig_aspect)
self.ax_map = self.fig.add_subplot(gs[0, 1])
#make grid meshes being sure the indexing is correct
self.mesh_ez_east, self.mesh_ez_vertical = np.meshgrid(self.grid_east,
self.grid_z,
indexing='ij')
self.mesh_nz_north, self.mesh_nz_vertical = np.meshgrid(self.grid_north,
self.grid_z,
indexing='ij')
self.mesh_en_east, self.mesh_en_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> plot east vs vertical
self._update_ax_ez()
#--> plot north vs vertical
self._update_ax_nz()
#--> plot east vs north
self._update_ax_en()
#--> plot the grid as a map view
self._update_map()
#plot color bar
cbx = mcb.make_axes(self.ax_map, fraction=.15, shrink=.75, pad = .15)
cb = mcb.ColorbarBase(cbx[0],
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]))
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1)))
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1))])
plt.show()
self.key_press = self.fig.canvas.mpl_connect('key_press_event',
self.on_key_press)
def on_key_press(self, event):
"""
on a key press change the slices
"""
key_press = event.key
if key_press == 'n':
if self.index_north == self.grid_north.shape[0]:
print 'Already at northern most grid cell'
else:
self.index_north += 1
if self.index_north > self.grid_north.shape[0]:
self.index_north = self.grid_north.shape[0]
self._update_ax_ez()
self._update_map()
if key_press == 'm':
if self.index_north == 0:
print 'Already at southern most grid cell'
else:
self.index_north -= 1
if self.index_north < 0:
self.index_north = 0
self._update_ax_ez()
self._update_map()
if key_press == 'e':
if self.index_east == self.grid_east.shape[0]:
print 'Already at eastern most grid cell'
else:
self.index_east += 1
if self.index_east > self.grid_east.shape[0]:
self.index_east = self.grid_east.shape[0]
self._update_ax_nz()
self._update_map()
if key_press == 'w':
if self.index_east == 0:
print 'Already at western most grid cell'
else:
self.index_east -= 1
if self.index_east < 0:
self.index_east = 0
self._update_ax_nz()
self._update_map()
if key_press == 'd':
if self.index_vertical == self.grid_z.shape[0]:
print 'Already at deepest grid cell'
else:
self.index_vertical += 1
if self.index_vertical > self.grid_z.shape[0]:
self.index_vertical = self.grid_z.shape[0]
self._update_ax_en()
print 'Depth = {0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
if key_press == 'u':
if self.index_vertical == 0:
print 'Already at surface grid cell'
else:
self.index_vertical -= 1
if self.index_vertical < 0:
self.index_vertical = 0
self._update_ax_en()
print 'Depth = {0:.5gf} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
def _update_ax_ez(self):
"""
update east vs vertical plot
"""
self.ax_ez.cla()
plot_ez = np.log10(self.res_model[self.index_north, :, :])
self.ax_ez.pcolormesh(self.mesh_ez_east,
self.mesh_ez_vertical,
plot_ez,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sx in self.station_dict_north[self.grid_north[self.index_north]]:
self.ax_ez.text(sx,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_ez.set_xlim(self.ew_limits)
self.ax_ez.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_ez.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_ez.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_nz(self):
"""
update east vs vertical plot
"""
self.ax_nz.cla()
plot_nz = np.log10(self.res_model[:, self.index_east, :])
self.ax_nz.pcolormesh(self.mesh_nz_north,
self.mesh_nz_vertical,
plot_nz,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sy in self.station_dict_east[self.grid_east[self.index_east]]:
self.ax_nz.text(sy,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_nz.set_xlim(self.ns_limits)
self.ax_nz.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_nz.set_xlabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_nz.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_en(self):
"""
update east vs vertical plot
"""
self.ax_en.cla()
plot_en = np.log10(self.res_model[:, :, self.index_vertical].T)
self.ax_en.pcolormesh(self.mesh_en_east,
self.mesh_en_north,
plot_en,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
self.ax_en.set_xlim(self.ew_limits)
self.ax_en.set_ylim(self.ns_limits)
self.ax_en.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_en.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_en.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.fig.canvas.draw()
self._update_map()
def _update_map(self):
self.ax_map.cla()
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx, xx])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
self.east_line_ylist.append(None)
self.ax_map.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy, yy])
self.north_line_ylist.append(None)
self.ax_map.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#--> e-w indication line
self.ax_map.plot([self.grid_east.min(),
self.grid_east.max()],
[self.grid_north[self.index_north+1],
self.grid_north[self.index_north+1]],
lw=1,
color='g')
#--> e-w indication line
self.ax_map.plot([self.grid_east[self.index_east+1],
self.grid_east[self.index_east+1]],
[self.grid_north.min(),
self.grid_north.max()],
lw=1,
color='b')
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_map.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.ax_map.set_xlim(self.ew_limits)
self.ax_map.set_ylim(self.ns_limits)
self.ax_map.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_map.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#plot stations
self.ax_map.text(self.ew_limits[0]*.95, self.ns_limits[1]*.95,
'{0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale),
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor': 'white'},
fontdict=self.font_dict)
self.fig.canvas.draw()
def get_station_grid_locations(self):
"""
get the grid line on which a station resides for plotting
"""
self.station_dict_east = dict([(gx, []) for gx in self.grid_east])
self.station_dict_north = dict([(gy, []) for gy in self.grid_north])
if self.station_east is not None:
for ss, sx in enumerate(self.station_east):
gx = np.where(self.grid_east <= sx)[0][-1]
self.station_dict_east[self.grid_east[gx]].append(self.station_north[ss])
for ss, sy in enumerate(self.station_north):
gy = np.where(self.grid_north <= sy)[0][-1]
self.station_dict_north[self.grid_north[gy]].append(self.station_east[ss])
else:
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_E{0}_N{1}_Z{2}.{3}'.format(
self.index_east, self.index_north,
self.index_vertical, file_format))
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot rms maps
#==============================================================================
class moved_Plot_RMS_Maps(object):
"""
plots the RMS as (data-model)/(error) in map view for all components
of the data file. Gets this infomration from the .res file output
by ModEM.
Arguments:
------------------
**residual_fn** : string
full path to .res file
=================== =======================================================
Attributes Description
=================== =======================================================
fig matplotlib.figure instance for a single plot
fig_dpi dots-per-inch resolution of figure *default* is 200
fig_num number of fig instance *default* is 1
fig_size size of figure in inches [width, height]
*default* is [7,6]
font_size font size of tick labels, axis labels are +2
*default* is 8
marker marker style for station rms,
see matplotlib.line for options,
*default* is 's' --> square
marker_size size of marker in points. *default* is 10
pad_x padding in map units from edge of the axis to stations
at the extremeties in longitude.
*default* is 1/2 tick_locator
pad_y padding in map units from edge of the axis to stations
at the extremeties in latitude.
*default* is 1/2 tick_locator
period_index index of the period you want to plot according to
self.residual.period_list. *default* is 1
plot_yn [ 'y' | 'n' ] default is 'y' to plot on instantiation
plot_z_list internal variable for plotting
residual modem.Data instance that holds all the information
from the residual_fn given
residual_fn full path to .res file
rms_cmap matplotlib.cm object for coloring the markers
rms_cmap_dict dictionary of color values for rms_cmap
rms_max maximum rms to plot. *default* is 5.0
rms_min minimum rms to plot. *default* is 1.0
save_path path to save figures to. *default* is directory of
residual_fn
subplot_bottom spacing from axis to bottom of figure canvas.
*default* is .1
subplot_hspace horizontal spacing between subplots.
*default* is .1
subplot_left spacing from axis to left of figure canvas.
*default* is .1
subplot_right spacing from axis to right of figure canvas.
*default* is .9
subplot_top spacing from axis to top of figure canvas.
*default* is .95
subplot_vspace vertical spacing between subplots.
*default* is .01
tick_locator increment for x and y major ticks. *default* is
limits/5
=================== =======================================================
=================== =======================================================
Methods Description
=================== =======================================================
plot plot rms maps for a single period
plot_loop loop over all frequencies and save figures to save_path
read_residual_fn read in residual_fn
redraw_plot after updating attributes call redraw_plot to
well redraw the plot
save_figure save the figure to a file
=================== =======================================================
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> rms_plot = Plot_RMS_Maps(r"/home/ModEM/Inv1/mb_NLCG_030.res")
>>> # change some attributes
>>> rms_plot.fig_size = [6, 4]
>>> rms_plot.rms_max = 3
>>> rms_plot.redraw_plot()
>>> # happy with the look now loop over all periods
>>> rms_plot.plot_loop()
"""
def __init__(self, residual_fn, **kwargs):
self.residual_fn = residual_fn
self.residual = None
self.save_path = kwargs.pop('save_path', os.path.dirname(self.residual_fn))
self.period_index = kwargs.pop('period_index', 0)
self.subplot_left = kwargs.pop('subplot_left', .1)
self.subplot_right = kwargs.pop('subplot_right', .9)
self.subplot_top = kwargs.pop('subplot_top', .95)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.subplot_hspace = kwargs.pop('subplot_hspace', .1)
self.subplot_vspace = kwargs.pop('subplot_vspace', .01)
self.font_size = kwargs.pop('font_size', 8)
self.fig_size = kwargs.pop('fig_size', [7.75, 6.75])
self.fig_dpi = kwargs.pop('fig_dpi', 200)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig = None
self.marker = kwargs.pop('marker', 's')
self.marker_size = kwargs.pop('marker_size', 10)
self.rms_max = kwargs.pop('rms_max', 5)
self.rms_min = kwargs.pop('rms_min', 0)
self.tick_locator = kwargs.pop('tick_locator', None)
self.pad_x = kwargs.pop('pad_x', None)
self.pad_y = kwargs.pop('pad_y', None)
self.plot_yn = kwargs.pop('plot_yn', 'y')
# colormap for rms, goes white to black from 0 to rms max and
# red below 1 to show where the data is being over fit
self.rms_cmap_dict = {'red':((0.0, 1.0, 1.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0))}
self.rms_cmap = colors.LinearSegmentedColormap('rms_cmap',
self.rms_cmap_dict,
256)
self.plot_z_list = [{'label':r'$Z_{xx}$', 'index':(0, 0), 'plot_num':1},
{'label':r'$Z_{xy}$', 'index':(0, 1), 'plot_num':2},
{'label':r'$Z_{yx}$', 'index':(1, 0), 'plot_num':3},
{'label':r'$Z_{yy}$', 'index':(1, 1), 'plot_num':4},
{'label':r'$T_{x}$', 'index':(0, 0), 'plot_num':5},
{'label':r'$T_{y}$', 'index':(0, 1), 'plot_num':6}]
if self.plot_yn == 'y':
self.plot()
def read_residual_fn(self):
if self.residual is None:
self.residual = Data()
self.residual.read_data_file(self.residual_fn)
else:
pass
def plot(self):
"""
plot rms in map view
"""
self.read_residual_fn()
font_dict = {'size':self.font_size+2, 'weight':'bold'}
rms_1 = 1./self.rms_max
if self.tick_locator is None:
x_locator = np.round((self.residual.data_array['lon'].max()-
self.residual.data_array['lon'].min())/5, 2)
y_locator = np.round((self.residual.data_array['lat'].max()-
self.residual.data_array['lat'].min())/5, 2)
if x_locator > y_locator:
self.tick_locator = x_locator
elif x_locator < y_locator:
self.tick_locator = y_locator
if self.pad_x is None:
self.pad_x = self.tick_locator/2
if self.pad_y is None:
self.pad_y = self.tick_locator/2
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
plt.rcParams['figure.subplot.wspace'] = self.subplot_hspace
plt.rcParams['figure.subplot.hspace'] = self.subplot_vspace
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
for p_dict in self.plot_z_list:
ax = self.fig.add_subplot(3, 2, p_dict['plot_num'], aspect='equal')
ii = p_dict['index'][0]
jj = p_dict['index'][0]
for r_arr in self.residual.data_array:
# calulate the rms self.residual/error
if p_dict['plot_num'] < 5:
rms = r_arr['z'][self.period_index, ii, jj].__abs__()/\
(r_arr['z_err'][self.period_index, ii, jj].real)
else:
rms = r_arr['tip'][self.period_index, ii, jj].__abs__()/\
(r_arr['tip_err'][self.period_index, ii, jj].real)
#color appropriately
if np.nan_to_num(rms) == 0.0:
marker_color = (1, 1, 1)
marker = '.'
marker_size = .1
marker_edge_color = (1, 1, 1)
if rms > self.rms_max:
marker_color = (0, 0, 0)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms >= 1 and rms <= self.rms_max:
r_color = 1-rms/self.rms_max+rms_1
marker_color = (r_color, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms < 1:
r_color = 1-rms/self.rms_max
marker_color = (1, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
ax.plot(r_arr['lon'], r_arr['lat'],
marker=marker,
ms=marker_size,
mec=marker_edge_color,
mfc=marker_color,
zorder=3)
if p_dict['plot_num'] == 1 or p_dict['plot_num'] == 3:
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
plt.setp(ax.get_xticklabels(), visible=False)
elif p_dict['plot_num'] == 2 or p_dict['plot_num'] == 4:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
elif p_dict['plot_num'] == 6:
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
else:
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
ax.text(self.residual.data_array['lon'].min()+.005-self.pad_x,
self.residual.data_array['lat'].max()-.005+self.pad_y,
p_dict['label'],
verticalalignment='top',
horizontalalignment='left',
bbox={'facecolor':'white'},
zorder=3)
ax.tick_params(direction='out')
ax.grid(zorder=0, color=(.75, .75, .75))
#[line.set_zorder(3) for line in ax.lines]
ax.set_xlim(self.residual.data_array['lon'].min()-self.pad_x,
self.residual.data_array['lon'].max()+self.pad_x)
ax.set_ylim(self.residual.data_array['lat'].min()-self.pad_y,
self.residual.data_array['lat'].max()+self.pad_y)
ax.xaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.yaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
#cb_ax = mcb.make_axes(ax, orientation='vertical', fraction=.1)
cb_ax = self.fig.add_axes([self.subplot_right+.02, .225, .02, .45])
color_bar = mcb.ColorbarBase(cb_ax,
cmap=self.rms_cmap,
norm=colors.Normalize(vmin=self.rms_min,
vmax=self.rms_max),
orientation='vertical')
color_bar.set_label('RMS', fontdict=font_dict)
self.fig.suptitle('period = {0:.5g} (s)'.format(self.residual.period_list[self.period_index]),
fontdict={'size':self.font_size+3, 'weight':'bold'})
plt.show()
def redraw_plot(self):
plt.close('all')
self.plot()
def save_figure(self, save_path=None, save_fn_basename=None,
save_fig_dpi=None, fig_format='.png', fig_close=True):
"""
save figure in the desired format
"""
if save_path is not None:
self.save_path = save_path
if save_fn_basename is not None:
pass
else:
save_fn_basename = '{0:02}_RMS_{1:.5g}_s.{2}'.format(self.period_index,
self.residual.period_list[self.period_index],
fig_format)
save_fn = os.path.join(self.save_path, save_fn_basename)
if save_fig_dpi is not None:
self.fig_dpi = save_fig_dpi
self.fig.savefig(save_fn, dpi=self.fig_dpi)
print 'saved file to {0}'.format(save_fn)
if fig_close == True:
plt.close('all')
def plot_loop(self, fig_format='png'):
"""
loop over all periods and save figures accordingly
"""
self.read_residual_fn()
for f_index in range(self.residual.period_list.shape[0]):
self.period_index = f_index
self.plot()
self.save_figure(fig_format=fig_format)
#==============================================================================
# Exceptions
#==============================================================================
class ModEMError(Exception):
pass
| gpl-3.0 |
rgommers/scipy | scipy/stats/tests/test_morestats.py | 4 | 102952 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_, assert_allclose, assert_equal, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import optimize
from scipy import stats
from scipy.stats.morestats import _abw_state
from .common_tests import check_named_results
from .._hypotests import _get_wilcoxon_distr
from scipy.stats._binomtest import _binary_search_for_binom_tst
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib # type: ignore[import]
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
# test data gear.dat from NIST for Levene and Bartlett test
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs:
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist:
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro:
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
# Verified against R
x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678)
w, pw = stats.shapiro(x3)
shapiro_test = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
shapiro_test = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, np.array([[], [2]], dtype=object))
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
shapiro_test = stats.shapiro(x)
assert_equal(w, np.nan)
assert_equal(shapiro_test.statistic, np.nan)
assert_almost_equal(pw, 1.0)
assert_almost_equal(shapiro_test.pvalue, 1.0)
class TestAnderson:
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
with np.errstate(all='ignore'):
A, crit, sig = stats.anderson(x2, 'expon')
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
# A constant array is a degenerate case and breaks gumbel_r.fit, so
# change one value in x2.
x2[0] = 0.996
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp:
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari:
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_bad_alternative(self):
# invalid value for alternative must raise a ValueError
x1 = [1, 2, 3, 4]
x2 = [5, 6, 7, 8]
match = "'alternative' must be 'two-sided'"
with assert_raises(ValueError, match=match):
stats.ansari(x1, x2, alternative='foo')
def test_alternative_exact(self):
x1 = [-5, 1, 5, 10, 15, 20, 25] # high scale, loc=10
x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5] # low scale, loc=10
# ratio of scales is greater than 1. So, the
# p-value must be high when `alternative='less'`
# and low when `alternative='greater'`.
statistic, pval = stats.ansari(x1, x2)
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert pval_l > 0.95
assert pval_g < 0.05 # level of significance.
# also check if the p-values sum up to 1 plus the the probability
# mass under the calculated statistic.
prob = _abw_state.pmf(statistic, len(x1), len(x2))
assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12)
# sanity check. The result should flip if
# we exchange x and y.
pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue
pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue
assert pval_l_reverse < 0.05
assert pval_g_reverse > 0.95
@pytest.mark.parametrize(
'x, y, alternative, expected',
# the tests are designed in such a way that the
# if else statement in ansari test for exact
# mode is covered.
[([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714),
([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714),
([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571),
([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)]
)
def test_alternative_exact_with_R(self, x, y, alternative, expected):
# testing with R on arbitrary data
# Sample R code used for the third test case above:
# ```R
# > options(digits=16)
# > x <- c(1,2,3)
# > y <- c(4,5,6,7,8)
# > ansari.test(x, y, alternative='less', exact=TRUE)
#
# Ansari-Bradley test
#
# data: x and y
# AB = 6, p-value = 0.8928571428571
# alternative hypothesis: true ratio of scales is less than 1
#
# ```
pval = stats.ansari(x, y, alternative=alternative).pvalue
assert_allclose(pval, expected, atol=1e-12)
def test_alternative_approx(self):
# intuitive tests for approximation
x1 = stats.norm.rvs(0, 5, size=100, random_state=123)
x2 = stats.norm.rvs(0, 2, size=100, random_state=123)
# for m > 55 or n > 55, the test should automatically
# switch to approximation.
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_l, 1.0, atol=1e-12)
assert_allclose(pval_g, 0.0, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
x1 = stats.norm.rvs(0, 2, size=60, random_state=123)
x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123)
pval = stats.ansari(x1, x2).pvalue
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1-pval/2, atol=1e-12)
class TestBartlett:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.bartlett, g1, x)
class TestLevene:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.levene, g1, x)
class TestBinomP:
"""Tests for stats.binom_test."""
binom_test_func = staticmethod(stats.binom_test)
def test_data(self):
pval = self.binom_test_func(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = self.binom_test_func(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = self.binom_test_func([682, 243], p=3/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, self.binom_test_func, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, self.binom_test_func, [100])
# n less than x[0]
assert_raises(ValueError, self.binom_test_func, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError,
self.binom_test_func, [50, 50], p=2.0)
def test_alternatives(self):
res = self.binom_test_func(51, 235, p=1/6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = self.binom_test_func(51, 235, p=1/6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = self.binom_test_func(51, 235, p=1/6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestBinomTestP(TestBinomP):
"""
Tests for stats.binomtest as a replacement for stats.binom_test.
"""
@staticmethod
def binom_test_func(x, n=None, p=0.5, alternative='two-sided'):
# This processing of x and n is copied from from binom_test.
x = np.atleast_1d(x).astype(np.int_)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
result = stats.binomtest(x, n, p=p, alternative=alternative)
return result.pvalue
class TestBinomTest:
"""Tests for stats.binomtest."""
# Expected results here are from R binom.test, e.g.
# options(digits=16)
# binom.test(484, 967, p=0.48)
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_two_sided_pvalues1(self):
# These tests work on all OS's but fail on
# Linux_Python_37_32bit_full due to numerical issues caused
# by large inputs.
rtol = 1e-13 # aarch64 observed rtol: 3.5e-13
res = stats.binomtest(10079999, 21000000, 0.48)
assert_allclose(res.pvalue, 1.0, rtol=rtol)
res = stats.binomtest(10079990, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9966892187965, rtol=rtol)
res = stats.binomtest(10080009, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9970377203856, rtol=rtol)
res = stats.binomtest(10080017, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9940754817328, rtol=1e-9)
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_two_sided_pvalues2(self):
rtol = 1e-13 # no aarch64 failure with 1e-15, preemptive bump
res = stats.binomtest(9, n=21, p=0.48)
assert_allclose(res.pvalue, 0.6689672431939, rtol=rtol)
res = stats.binomtest(4, 21, 0.48)
assert_allclose(res.pvalue, 0.008139563452106, rtol=rtol)
res = stats.binomtest(11, 21, 0.48)
assert_allclose(res.pvalue, 0.8278629664608, rtol=rtol)
res = stats.binomtest(7, 21, 0.48)
assert_allclose(res.pvalue, 0.1966772901718, rtol=1e-12)
res = stats.binomtest(3, 10, .5)
assert_allclose(res.pvalue, 0.34375, rtol=rtol)
res = stats.binomtest(2, 2, .4)
assert_allclose(res.pvalue, 0.16, rtol=rtol)
res = stats.binomtest(2, 4, .3)
assert_allclose(res.pvalue, 0.5884, rtol=rtol)
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_edge_cases(self):
rtol = 1e-14 # aarch64 observed rtol: 1.33e-15
res = stats.binomtest(484, 967, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(3, 47, 3/47)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(13, 46, 13/46)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(15, 44, 15/44)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(7, 13, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
res = stats.binomtest(6, 11, 0.5)
assert_allclose(res.pvalue, 1, rtol=rtol)
def test_binary_srch_for_binom_tst(self):
# Test that old behavior of binomtest is maintained
# by the new binary search method in cases where d
# exactly equals the input on one side.
n = 10
p = 0.5
k = 3
# First test for the case where k > mode of PMF
i = np.arange(np.ceil(p * n), n+1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y, probably consistent with R.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
-stats.binom.pmf(x1, n, p),
-d, np.ceil(p * n), n)
y2 = n - ix + int(d == stats.binom.pmf(ix, n, p))
assert_allclose(y1, y2, rtol=1e-9)
# Now test for the other side.
k = 7
i = np.arange(np.floor(p * n) + 1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
stats.binom.pmf(x1, n, p),
d, 0, np.floor(p * n))
y2 = ix + 1
assert_allclose(y1, y2, rtol=1e-9)
# Expected results here are from R 3.6.2 binom.test
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less', 0.148831050443,
0.0, 0.2772002496709138),
('greater', 0.9004695898947,
0.1366613252458672, 1.0),
('two-sided', 0.2983720970096,
0.1266555521019559, 0.2918426890886281)])
def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(20, n=100, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-12)
assert_equal(res.proportion_estimate, 0.2)
ci = res.proportion_ci(confidence_level=0.95)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-12)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less',
0.005656361, 0.0, 0.1872093),
('greater',
0.9987146, 0.008860761, 1.0),
('two-sided',
0.01191714, 0.006872485, 0.202706269)])
def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(3, n=50, p=0.2, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
assert_equal(res.proportion_estimate, 0.06)
ci = res.proportion_ci(confidence_level=0.99)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_high',
[('less', 0.05631351, 0.2588656),
('greater', 1.0, 1.0),
('two-sided', 0.07604122, 0.3084971)])
def test_confidence_interval_exact_k0(self, alternative, pval, ci_high):
# Test with k=0, n = 10.
res = stats.binomtest(0, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.low, 0.0)
assert_allclose(ci.high, ci_high, rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low',
[('less', 1.0, 0.0),
('greater', 9.536743e-07, 0.7411344),
('two-sided', 9.536743e-07, 0.6915029)])
def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low):
# Test with k = n = 10.
res = stats.binomtest(10, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.high, 1.0)
assert_allclose(ci.low, ci_low, rtol=1e-6)
# Expected results are from the prop.test function in R 3.6.2.
@pytest.mark.parametrize(
'k, alternative, corr, conf, ci_low, ci_high',
[[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928],
[3, 'two-sided', True, 0.99, 0.0586329, 0.7169416],
[3, 'two-sided', False, 0.95, 0.1077913, 0.6032219],
[3, 'two-sided', False, 0.99, 0.07956632, 0.6799753],
[3, 'less', True, 0.95, 0.0, 0.6043476],
[3, 'less', True, 0.99, 0.0, 0.6901811],
[3, 'less', False, 0.95, 0.0, 0.5583002],
[3, 'less', False, 0.99, 0.0, 0.6507187],
[3, 'greater', True, 0.95, 0.09644904, 1.0],
[3, 'greater', True, 0.99, 0.06659141, 1.0],
[3, 'greater', False, 0.95, 0.1268766, 1.0],
[3, 'greater', False, 0.99, 0.08974147, 1.0],
[0, 'two-sided', True, 0.95, 0.0, 0.3445372],
[0, 'two-sided', False, 0.95, 0.0, 0.2775328],
[0, 'less', True, 0.95, 0.0, 0.2847374],
[0, 'less', False, 0.95, 0.0, 0.212942],
[0, 'greater', True, 0.95, 0.0, 1.0],
[0, 'greater', False, 0.95, 0.0, 1.0],
[10, 'two-sided', True, 0.95, 0.6554628, 1.0],
[10, 'two-sided', False, 0.95, 0.7224672, 1.0],
[10, 'less', True, 0.95, 0.0, 1.0],
[10, 'less', False, 0.95, 0.0, 1.0],
[10, 'greater', True, 0.95, 0.7152626, 1.0],
[10, 'greater', False, 0.95, 0.787058, 1.0]]
)
def test_ci_wilson_method(self, k, alternative, corr, conf,
ci_low, ci_high):
res = stats.binomtest(k, n=10, p=0.1, alternative=alternative)
if corr:
method = 'wilsoncc'
else:
method = 'wilson'
ci = res.proportion_ci(confidence_level=conf, method=method)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
def test_estimate_equals_hypothesized_prop(self):
# Test the special case where the estimated proportion equals
# the hypothesized proportion. When alternative is 'two-sided',
# the p-value is 1.
res = stats.binomtest(4, 16, 0.25)
assert_equal(res.proportion_estimate, 0.25)
assert_equal(res.pvalue, 1.0)
@pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)])
def test_invalid_k_n(self, k, n):
with pytest.raises(ValueError,
match="must be an integer not less than"):
stats.binomtest(k, n)
def test_invalid_k_too_big(self):
with pytest.raises(ValueError,
match="k must not be greater than n"):
stats.binomtest(11, 10, 0.25)
def test_invalid_confidence_level(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="must be in the interval"):
res.proportion_ci(confidence_level=-1)
def test_invalid_ci_method(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="method must be"):
res.proportion_ci(method="plate of shrimp")
class TestFligner:
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Perturb input to break ties in the transformed data
# See https://github.com/scipy/scipy/pull/8042 for more details
rs = np.random.RandomState(123)
_perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist()
g1_ = _perturb(g1)
g2_ = _perturb(g2)
g3_ = _perturb(g3)
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood:
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
def test_mood_alternative(self):
np.random.seed(0)
x = stats.norm.rvs(scale=0.75, size=100)
y = stats.norm.rvs(scale=1.25, size=100)
stat1, p1 = stats.mood(x, y, alternative='two-sided')
stat2, p2 = stats.mood(x, y, alternative='less')
stat3, p3 = stats.mood(x, y, alternative='greater')
assert stat1 == stat2 == stat3
assert_allclose(p1, 0, atol=1e-7)
assert_allclose(p2, p1/2)
assert_allclose(p3, 1 - p1/2)
with pytest.raises(ValueError, match="alternative must be..."):
stats.mood(x, y, alternative='ekki-ekki')
@pytest.mark.xfail(reason="SciPy needs tie correction like R (gh-13730)")
@pytest.mark.parametrize("alternative, expected",
[('two-sided', (1.037127561496, 0.299676411857)),
('less', (1.0371275614961, 0.8501617940715)),
('greater', (1.037127561496, 0.1498382059285))])
def test_mood_alternative_against_R(self, alternative, expected):
## Test againts R mood.test: https://rdrr.io/r/stats/mood.test.html
# options(digits=16)
# x <- c(111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
# 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)
# y <- c(107, 108, 106, 98, 105, 103, 110, 105, 104,
# 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)
# mood.test(x, y, alternative='less')
x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98]
y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
96, 108, 103, 104, 114, 114, 113, 108, 106, 99]
res = stats.mood(x, y, alternative=alternative)
assert_allclose(res, expected)
class TestProbplot:
def test_basic(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
x = stats.norm.rvs(size=100, random_state=123456)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist:
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100, random_state=7654321)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
class TestWilcoxon:
def test_wilcoxon_bad_arg(self):
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
alternative="dummy")
assert_raises(ValueError, stats.wilcoxon, [1]*10, mode="xyz")
def test_zero_diff(self):
x = np.arange(20)
# pratt and wilcox do not work if x - y == 0
assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox",
mode="approx")
assert_raises(ValueError, stats.wilcoxon, x, x, "pratt",
mode="approx")
# ranksum is n*(n+1)/2, split in half if zero_method == "zsplit"
assert_equal(stats.wilcoxon(x, x, "zsplit", mode="approx"),
(20*21/4, 1.0))
def test_pratt(self):
# regression test for gh-6805: p-value matches value from R package
# coin (wilcoxsign_test) reported in the issue
x = [1, 2, 3, 4]
y = [1, 2, 3, 5]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
res = stats.wilcoxon(x, y, zero_method="pratt", mode="approx")
assert_allclose(res, (0.0, 0.31731050786291415))
def test_wilcoxon_arg_type(self):
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt", mode="approx")
_ = stats.wilcoxon(arr, zero_method="zsplit", mode="approx")
_ = stats.wilcoxon(arr, zero_method="wilcox", mode="approx")
def test_accuracy_wilcoxon(self):
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt", mode="approx")
assert_allclose(T, 423)
assert_allclose(p, 0.0031724568006762576)
T, p = stats.wilcoxon(x, y, "zsplit", mode="approx")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox", mode="approx")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes(self):
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False, mode="approx")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie(self):
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10, mode="approx")
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True, mode="approx")
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
def test_onesided(self):
# tested against "R version 3.4.1 (2017-06-30)"
# x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
# y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
# cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7031847, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7233656, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.2968153, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.3176447, decimal=6)
def test_exact_basic(self):
for n in range(1, 26):
cnt = _get_wilcoxon_distr(n)
assert_equal(n*(n+1)/2 + 1, len(cnt))
assert_equal(sum(cnt), 2**n)
def test_exact_pval(self):
# expected values computed with "R version 3.4.1 (2017-06-30)"
x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23,
-0.75, 0.14])
y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24,
-0.68, -0.76])
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.1054688, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.9580078, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.05273438, decimal=6)
x = np.arange(0, 20) + 0.5
y = np.arange(20, 0, -1)
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.8694878, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.4347439, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.5795889, decimal=6)
d = np.arange(26) + 1
assert_raises(ValueError, stats.wilcoxon, d, mode="exact")
# These inputs were chosen to give a W statistic that is either the
# center of the distribution (when the length of the support is odd), or
# the value to the left of the center (when the length of the support is
# even). Also, the numbers are chosen so that the W statistic is the
# sum of the positive values.
@pytest.mark.parametrize('x', [[-1, -2, 3],
[-1, 2, -3, -4, 5],
[-1, -2, 3, -4, -5, -6, 7, 8]])
def test_exact_p_1(self, x):
w, p = stats.wilcoxon(x)
x = np.array(x)
wtrue = x[x > 0].sum()
assert_equal(w, wtrue)
assert_equal(p, 1)
def test_auto(self):
# auto default to exact if there are no ties and n<= 25
x = np.arange(0, 25) + 0.5
y = np.arange(25, 0, -1)
assert_equal(stats.wilcoxon(x, y),
stats.wilcoxon(x, y, mode="exact"))
# if there are ties (i.e. zeros in d = x-y), then switch to approx
d = np.arange(0, 13)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Exact p-value calculation")
w, p = stats.wilcoxon(d)
assert_equal(stats.wilcoxon(d, mode="approx"), (w, p))
# use approximation for samples > 25
d = np.arange(1, 27)
assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, mode="approx"))
class TestKstat:
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]]
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar:
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax:
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7)
def test_dist(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=7)
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=7)
class TestBoxcox_llf:
def test_basic(self):
x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
def test_gh_6873(self):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = [198.0, 233.0, 233.0, 392.0]
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
assert_allclose(llf, -17.93934208579061)
# This is the data from github user Qukaiyi, given as an example
# of a data set that caused boxcox to fail.
_boxcox_data = [
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
1891609
]
class TestBoxcox:
def test_fixed_lmbda(self):
x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
rng = np.random.RandomState(1234)
x = stats.loggamma.rvs(5, size=50, random_state=rng) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500, random_state=rng) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1, 2])
assert_raises(ValueError, stats.boxcox, x)
# Raise ValueError if data is constant.
assert_raises(ValueError, stats.boxcox, np.array([1]))
# Raise ValueError if data is not 1-dimensional.
assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'. I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5)
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, bounds):
# Define custom optimizer with bounds.
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
assert bounds[0] < lmbda < bounds[1]
def test_bounded_optimizer_against_unbounded_optimizer(self):
# Test whether setting bounds on optimizer excludes solution from
# unbounded optimizer.
# Get unbounded solution.
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
# Set tolerance and bounds around solution.
bounds = (lmbda + 0.1, lmbda + 1)
options = {'xatol': 1e-12}
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded", options=options)
# Check bounded solution. Lower bound should be active.
_, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
optimizer=optimizer)
assert lmbda_bounded != lmbda
assert_allclose(lmbda_bounded, bounds[0])
@pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
def test_bad_optimizer_type_raises_error(self, optimizer):
# Check if error is raised if string, tuple or float is passed
with pytest.raises(ValueError, match="`optimizer` must be a callable"):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
def test_bad_optimizer_value_raises_error(self):
# Check if error is raised if `optimizer` function does not return
# `OptimizeResult` object
# Define test function that always returns 1
def optimizer(fun):
return 1
message = "`optimizer` must return an object containing the optimal..."
with pytest.raises(ValueError, match=message):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
class TestBoxcoxNormmax:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
@pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, method, bounds):
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
maxlog = stats.boxcox_normmax(self.x, method=method,
optimizer=optimizer)
assert np.all(bounds[0] < maxlog)
assert np.all(maxlog < bounds[1])
def test_user_defined_optimizer(self):
# tests an optimizer that is not based on scipy.optimize.minimize
lmbda = stats.boxcox_normmax(self.x)
lmbda_rounded = np.round(lmbda, 5)
lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
class MyResult:
pass
def optimizer(fun):
# brute force minimum over the range
objs = []
for lmbda in lmbda_range:
objs.append(fun(lmbda))
res = MyResult()
res.x = lmbda_range[np.argmin(objs)]
return res
lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
assert lmbda2 != lmbda # not identical
assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be
def test_user_defined_optimizer_and_brack_raises_error(self):
optimizer = optimize.minimize_scalar
# Using default `brack=None` with user-defined `optimizer` works as
# expected.
stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
# Using user-defined `brack` with user-defined `optimizer` is expected
# to throw an error. Instead, users should specify
# optimizer-specific parameters in the optimizer function itself.
with pytest.raises(ValueError, match="`brack` must be None if "
"`optimizer` is given"):
stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
optimizer=optimizer)
class TestBoxcoxNormplot:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestYeojohnson_llf:
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
class TestYeojohnson:
def test_fixed_lmbda(self):
rng = np.random.RandomState(12345)
# Test positive input
x = stats.loggamma.rvs(5, size=50, random_state=rng) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = stats.loggamma.rvs(5, size=50, random_state=rng) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = stats.loggamma.rvs(5, size=50, random_state=rng) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
n_samples = 20000
np.random.seed(1234567)
x = np.random.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_input_dtype_complex(self, dtype):
x = np.arange(6, dtype=dtype)
err_msg = ('Yeo-Johnson transformation is not defined for complex '
'numbers.')
with pytest.raises(ValueError, match=err_msg):
stats.yeojohnson(x)
@pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
def test_input_dtype_integer(self, dtype):
x_int = np.arange(8, dtype=dtype)
x_float = np.arange(8, dtype=np.float64)
xt_int, lmbda_int = stats.yeojohnson(x_int)
xt_float, lmbda_float = stats.yeojohnson(x_float)
assert_allclose(xt_int, xt_float, rtol=1e-7)
assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
class TestYeojohnsonNormmax:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
def test_mle(self):
maxlog = stats.yeojohnson_normmax(self.x)
assert_allclose(maxlog, 1.876393, rtol=1e-6)
def test_darwin_example(self):
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
lmbda = stats.yeojohnson_normmax(x)
assert np.allclose(lmbda, 1.305, atol=1e-3)
class TestCircFuncs:
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_circfuncs(self, test_func, expected):
x = np.array([355, 5, 2, 359, 10, 350])
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
@pytest.mark.parametrize("test_func, numpy_func",
[(stats.circmean, np.mean),
(stats.circvar, np.var),
(stats.circstd, np.std)])
def test_circfuncs_close(self, test_func, numpy_func):
# circfuncs should handle very similar inputs (gh-12740)
x = np.array([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
circstat = test_func(x)
normal = numpy_func(x)
assert_allclose(circstat, normal, atol=1e-8)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_circfuncs_array_like(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_empty(self, test_func):
assert_(np.isnan(test_func([])))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_propagate(self, test_func):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_(np.isnan(test_func(x, high=360)))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: np.nan, 0: 355.66582264, 1: 0.28725053}),
(stats.circvar,
{None: np.nan, 0: 16.89976130, 1: 36.51366669}),
(stats.circstd,
{None: np.nan, 0: 4.11093193, 1: 6.04265394})])
def test_nan_propagate_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, 1],
[351, 7, 4, 352, 9, 349, np.nan],
[1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, axis=axis)
if axis is None:
assert_(np.isnan(out))
else:
assert_allclose(out[0], expected[axis], rtol=1e-7)
assert_(np.isnan(out[1:]).all())
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: 359.4178026893944,
0: np.array([353.0, 6.0, 3.0, 355.5, 9.5,
349.5]),
1: np.array([0.16769015, 358.66510252])}),
(stats.circvar,
{None: 55.362093503276725,
0: np.array([4.00081258, 1.00005077, 1.00005077,
12.25762620, 0.25000317,
0.25000317]),
1: np.array([42.51955609, 67.09872148])}),
(stats.circstd,
{None: 7.440570778057074,
0: np.array([2.00020313, 1.00002539, 1.00002539,
3.50108929, 0.50000317,
0.50000317]),
1: np.array([6.52070212, 8.19138093])})])
def test_nan_omit_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, 9, 349, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, nan_policy='omit', axis=axis)
if axis is None:
assert_allclose(out, expected[axis], rtol=1e-7)
else:
assert_allclose(out[:-1], expected[axis], rtol=1e-7)
assert_(np.isnan(out[-1]))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_nan_omit(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_allclose(test_func(x, high=360, nan_policy='omit'),
expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all(self, test_func):
x = [np.nan, np.nan, np.nan, np.nan, np.nan]
assert_(np.isnan(test_func(x, nan_policy='omit')))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all_axis(self, test_func):
x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
out = test_func(x, nan_policy='omit', axis=1)
assert_(np.isnan(out).all())
assert_(len(out) == 2)
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_raise(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='raise')
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_bad_nan_policy(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar')
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_unit8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
class TestMedianTest:
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/types/test_dtypes.py | 7 | 13569 | # -*- coding: utf-8 -*-
from itertools import product
import nose
import numpy as np
import pandas as pd
from pandas import Series, Categorical, date_range
from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype
from pandas.types.common import (is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
is_period_dtype, is_period,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype, is_string_dtype,
_coerce_to_dtype)
import pandas.util.testing as tm
_multiprocess_can_split_ = True
class Base(object):
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
self.assertRaises(self.dtype == 'foo')
self.assertFalse(is_dtype_equal(self.dtype, np.int64))
def test_numpy_informed(self):
# np.dtype doesn't know about our new dtype
def f():
np.dtype(self.dtype)
self.assertRaises(TypeError, f)
self.assertNotEqual(self.dtype, np.str_)
self.assertNotEqual(np.str_, self.dtype)
def test_pickle(self):
result = self.round_trip_pickle(self.dtype)
self.assertEqual(result, self.dtype)
class TestCategoricalDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = CategoricalDtype()
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = CategoricalDtype()
self.assertTrue(dtype == dtype2)
self.assertTrue(dtype2 == dtype)
self.assertTrue(dtype is dtype2)
self.assertTrue(dtype2 is dtype)
self.assertTrue(hash(dtype) == hash(dtype2))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'category'))
self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
self.assertTrue(CategoricalDtype.is_dtype('category'))
self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
self.assertFalse(CategoricalDtype.is_dtype('foo'))
self.assertFalse(CategoricalDtype.is_dtype(np.float64))
def test_basic(self):
self.assertTrue(is_categorical_dtype(self.dtype))
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
self.assertTrue(is_categorical_dtype(s.dtype))
self.assertTrue(is_categorical_dtype(s))
self.assertFalse(is_categorical_dtype(np.dtype('float64')))
self.assertTrue(is_categorical(s.dtype))
self.assertTrue(is_categorical(s))
self.assertFalse(is_categorical(np.dtype('float64')))
self.assertFalse(is_categorical(1.0))
class TestDatetimeTZDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = DatetimeTZDtype('ns', 'US/Eastern')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
self.assertTrue(dtype == dtype2)
self.assertTrue(dtype2 == dtype)
self.assertTrue(dtype3 == dtype)
self.assertTrue(dtype is dtype2)
self.assertTrue(dtype2 is dtype)
self.assertTrue(dtype3 is dtype)
self.assertTrue(hash(dtype) == hash(dtype2))
self.assertTrue(hash(dtype) == hash(dtype3))
def test_construction(self):
self.assertRaises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_coerce_to_dtype(self):
self.assertEqual(_coerce_to_dtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('ns', 'US/Eastern'))
self.assertEqual(_coerce_to_dtype('datetime64[ns, Asia/Tokyo]'),
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('datetime64[ns, US/Eastern]'))
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype(
'ns', 'US/Pacific')))
self.assertFalse(DatetimeTZDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype,
'datetime64[ns, US/Eastern]'))
self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype(
'ns', 'US/Eastern')))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
self.assertFalse(is_dtype_equal(self.dtype, DatetimeTZDtype('ns',
'CET')))
self.assertFalse(is_dtype_equal(
DatetimeTZDtype('ns', 'US/Eastern'), DatetimeTZDtype(
'ns', 'US/Pacific')))
# numpy compat
self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]"))
def test_basic(self):
self.assertTrue(is_datetime64tz_dtype(self.dtype))
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
self.assertTrue(is_datetime64tz_dtype(s.dtype))
self.assertTrue(is_datetime64tz_dtype(s))
self.assertFalse(is_datetime64tz_dtype(np.dtype('float64')))
self.assertFalse(is_datetime64tz_dtype(1.0))
self.assertTrue(is_datetimetz(s))
self.assertTrue(is_datetimetz(s.dtype))
self.assertFalse(is_datetimetz(np.dtype('float64')))
self.assertFalse(is_datetimetz(1.0))
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
self.assertTrue(is_datetimetz(s1))
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
self.assertTrue(is_datetimetz(s2))
self.assertEqual(s1.dtype, s2.dtype)
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
self.assertEqual(
DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)),
DatetimeTZDtype('ns', tz),
)
def test_empty(self):
dt = DatetimeTZDtype()
with tm.assertRaises(AttributeError):
str(dt)
class TestPeriodDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = PeriodDtype('D')
def test_construction(self):
with tm.assertRaises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day())
self.assertTrue(is_period_dtype(dt))
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day(3))
self.assertTrue(is_period_dtype(dt))
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Hour(26))
self.assertTrue(is_period_dtype(dt))
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_identity(self):
self.assertEqual(PeriodDtype('period[D]'),
PeriodDtype('period[D]'))
self.assertIs(PeriodDtype('period[D]'),
PeriodDtype('period[D]'))
self.assertEqual(PeriodDtype('period[3D]'),
PeriodDtype('period[3D]'))
self.assertIs(PeriodDtype('period[3D]'),
PeriodDtype('period[3D]'))
self.assertEqual(PeriodDtype('period[1S1U]'),
PeriodDtype('period[1000001U]'))
self.assertIs(PeriodDtype('period[1S1U]'),
PeriodDtype('period[1000001U]'))
def test_coerce_to_dtype(self):
self.assertEqual(_coerce_to_dtype('period[D]'),
PeriodDtype('period[D]'))
self.assertEqual(_coerce_to_dtype('period[3M]'),
PeriodDtype('period[3M]'))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('period[D]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('period[D]'))
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = PeriodDtype.construct_from_string('period[D]')
self.assertTrue(is_dtype_equal(self.dtype, result))
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('foo')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
self.assertTrue(PeriodDtype.is_dtype(self.dtype))
self.assertTrue(PeriodDtype.is_dtype('period[D]'))
self.assertTrue(PeriodDtype.is_dtype('period[3D]'))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('3D')))
self.assertTrue(PeriodDtype.is_dtype('period[U]'))
self.assertTrue(PeriodDtype.is_dtype('period[S]'))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('U')))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('S')))
self.assertFalse(PeriodDtype.is_dtype('D'))
self.assertFalse(PeriodDtype.is_dtype('3D'))
self.assertFalse(PeriodDtype.is_dtype('U'))
self.assertFalse(PeriodDtype.is_dtype('S'))
self.assertFalse(PeriodDtype.is_dtype('foo'))
self.assertFalse(PeriodDtype.is_dtype(np.object_))
self.assertFalse(PeriodDtype.is_dtype(np.int64))
self.assertFalse(PeriodDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'period[D]'))
self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
self.assertTrue(is_dtype_equal(PeriodDtype('D'), PeriodDtype('D')))
self.assertFalse(is_dtype_equal(self.dtype, 'D'))
self.assertFalse(is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D')))
def test_basic(self):
self.assertTrue(is_period_dtype(self.dtype))
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
self.assertTrue(is_period_dtype(pidx.dtype))
self.assertTrue(is_period_dtype(pidx))
self.assertTrue(is_period(pidx))
s = Series(pidx, name='A')
# dtypes
# series results in object dtype currently,
# is_period checks period_arraylike
self.assertFalse(is_period_dtype(s.dtype))
self.assertFalse(is_period_dtype(s))
self.assertTrue(is_period(s))
self.assertFalse(is_period_dtype(np.dtype('float64')))
self.assertFalse(is_period_dtype(1.0))
self.assertFalse(is_period(np.dtype('float64')))
self.assertFalse(is_period(1.0))
def test_empty(self):
dt = PeriodDtype()
with tm.assertRaises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
self.assertFalse(is_string_dtype(PeriodDtype('D')))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
spallavolu/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
hkropp/incubator-zeppelin | spark/src/main/resources/python/zeppelin_pyspark.py | 16 | 12106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
import ast
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
output = Logger()
sys.stdout = output
sys.stderr = output
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
jobDesc = req.jobDescription()
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, jobDesc)
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
zycdragonball/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
spallavolu/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/io/msgpack/test_sequnpack.py | 14 | 3074 | # coding: utf-8
from pandas import compat
from pandas.io.msgpack import Unpacker, BufferFull
from pandas.io.msgpack import OutOfData
import pytest
import pandas.util.testing as tm
class TestPack(object):
def test_partial_data(self):
unpacker = Unpacker()
msg = "No more data to unpack"
for data in [b"\xa5", b"h", b"a", b"l", b"l"]:
unpacker.feed(data)
with tm.assert_raises_regex(StopIteration, msg):
next(iter(unpacker))
unpacker.feed(b"o")
assert next(iter(unpacker)) == b"hallo"
def test_foobar(self):
unpacker = Unpacker(read_size=3, use_list=1)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
assert unpacker.unpack() == ord(b'o')
assert unpacker.unpack() == ord(b'o')
assert unpacker.unpack() == ord(b'b')
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
pytest.raises(OutOfData, unpacker.unpack)
unpacker.feed(b'foo')
unpacker.feed(b'bar')
k = 0
for o, e in zip(unpacker, 'foobarbaz'):
assert o == ord(e)
k += 1
assert k == len(b'foobar')
def test_foobar_skip(self):
unpacker = Unpacker(read_size=3, use_list=1)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
unpacker.skip()
assert unpacker.unpack() == ord(b'o')
unpacker.skip()
assert unpacker.unpack() == ord(b'a')
unpacker.skip()
pytest.raises(OutOfData, unpacker.unpack)
def test_maxbuffersize(self):
pytest.raises(ValueError, Unpacker, read_size=5, max_buffer_size=3)
unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1)
unpacker.feed(b'fo')
pytest.raises(BufferFull, unpacker.feed, b'ob')
unpacker.feed(b'o')
assert ord('f') == next(unpacker)
unpacker.feed(b'b')
assert ord('o') == next(unpacker)
assert ord('o') == next(unpacker)
assert ord('b') == next(unpacker)
def test_readbytes(self):
unpacker = Unpacker(read_size=3)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
assert unpacker.read_bytes(3) == b'oob'
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
# Test buffer refill
unpacker = Unpacker(compat.BytesIO(b'foobar'), read_size=3)
assert unpacker.unpack() == ord(b'f')
assert unpacker.read_bytes(3) == b'oob'
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
def test_issue124(self):
unpacker = Unpacker()
unpacker.feed(b'\xa1?\xa1!')
assert tuple(unpacker) == (b'?', b'!')
assert tuple(unpacker) == ()
unpacker.feed(b"\xa1?\xa1")
assert tuple(unpacker) == (b'?', )
assert tuple(unpacker) == ()
unpacker.feed(b"!")
assert tuple(unpacker) == (b'!', )
assert tuple(unpacker) == ()
| bsd-3-clause |
aspiringguru/sentexTuts | PracMachLrng/sentex_ML_demo14_K_nearest_Neighbours.py | 1 | 3938 | '''
Creating Our K Nearest Neighbors Algorithm - Practical Machine Learning with Python p.16 & 17
https://youtu.be/n3RqsMz3-0A?list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v
K nearest neighbours comparison requires _ALL_ points compared.
big O notation assessment is huge. ie: slow and does not scale well.
https://en.wikipedia.org/wiki/Euclidean_distance
The distance (d) from p to q, or from q to p is given by the Pythagorean formula:
d(q,p) = d(p,q) = sqrt( (q1-p1)^2 + (q2-p2)^2 + .... + (qn-pn)^2)
[recall hyptoneuse of 90 deg triangle formula h = sqrt(x^2 + y^2) where x & y are the square sides.]
euclidian distance = sqrt(Sum [i=1 to n] (qi - pi)^2)
'''
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from matplotlib import style
from collections import Counter
style.use("fivethirtyeight")
dataset = { 'k': [[1,2], [2,3], [3,1]], 'r':[[6,5], [7,7], [8,6]]}
print ("type(dataset)=", type(dataset))
#added a second new_features to demonstrate the k_nearest_neighbors result.
new_features = [5,7]
new_features2 = [2,4]
new_features3 = [4,4]
def k_nearest_neighbors(data, predict, k=3):
if len(data) >= k:
warnings.warn("K is set to a value less than total voting groups. IDIOT!!")
distances = []
for group in data:
for features in data[group]:
#euclidian_distance = sqrt( (features[0]-predict[0])**2 + (features[1]-predict[1])**2 )
# this is not fast. iterating through list of lists will be big O n^2. bad.
#this is 2D only. often need N dimensions.
euclidian_distance = np.linalg.norm(np.array(features)-np.array(predict))
distances.append([euclidian_distance, group])
votes = [i[1] for i in sorted(distances)[:k]]
#i is the group. i[1] is the point nearest to i[0]
#[:k] = subsetting list from start to k
#this one liner above would be equivalent to
# votes = []
#for i in sorted(distances)[:k]
# votes.append(i[1])
vote_result = Counter(votes).most_common(1)[0][0]
print ("type(Counter(votes).most_common(1))=", type(Counter(votes).most_common(1)) )
print ("type(Counter(votes).most_common(1)[0])=", type(Counter(votes).most_common(1)[0]) )
print ("Counter(votes).most_common(1)=", Counter(votes).most_common(1))
#Counter(votes).most_common(1) is a list of a tuple.
#we only want the most common result. most_common(1)
return vote_result
result = k_nearest_neighbors(dataset, new_features, k=3)
print ("result=", result)
[ [plt.scatter(ii[0], ii[1], s=100, color=i) for ii in dataset[i] ] for i in dataset]
#plt.scatter(new_features[0], new_features[1], s=100, color='y', marker='s' )
plt.scatter(new_features[0], new_features[1], s=100, color=result, marker='s' )
#now classify second point.
result = k_nearest_neighbors(dataset, new_features2, k=3)
plt.scatter(new_features2[0], new_features2[1], s=100, color=result, marker='s' )
result = k_nearest_neighbors(dataset, new_features3, k=3)
plt.scatter(new_features3[0], new_features3[1], s=100, color=result, marker='s' )
new_features4 = [4,5]
result = k_nearest_neighbors(dataset, new_features4, k=3)
plt.scatter(new_features4[0], new_features4[1], s=100, color=result, marker='s' )
plt.show()
'''
http://matplotlib.org/api/markers_api.html
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
numpy.linalg.norm(x, ord=None, axis=None, keepdims=False)[source]
Matrix or vector norm.
This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms
(described below), depending on the value of the ord parameter.
https://docs.python.org/3/howto/sorting.html
https://docs.python.org/3/library/functions.html#sorted
https://docs.python.org/2/library/collections.html#collections.Counter
most_common([n])
Return a list of the n most common elements and their counts from the most common to the least.
''' | mit |
tawsifkhan/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
maxentile/advanced-ml-project | scripts/synthetic_data.py | 1 | 1821 | from sklearn.neighbors import KernelDensity
import numpy as np
import numpy.random as npr
import itertools
def hypercube(ndim=2):
corners = [-1,1]
corner_list = [corners for _ in np.arange(ndim)]
return np.array([i for i in itertools.product(*corner_list)])
def generate_n_blobs(num_samples=5000, nblobs=10,separation=8,ndim=2):
centers = np.random.rand(nblobs, ndim)
centers *= separation
kde = KernelDensity()
kde.fit(centers)
samples = kde.sample(num_samples)
density = np.exp(kde.score_samples(samples))
return samples,density
def generate_blobs(num_samples=5000,separation=8,ndim=2):
# centers = np.array([[0,0],[1,0],[0,1],[1,1]],dtype=float)
# centers -= 0.5
# centers = np.vstack((centers,#centers*2,centers*3,
#centers*4,centers*5,centers*6,
#centers*7,centers*8,centers*9,
#centers*10,centers*11,centers*12,
#centers*13,centers*14,
# [0,0]))
centers = hypercube(ndim)
centers *= separation
kde = KernelDensity()
kde.fit(centers)
samples = kde.sample(num_samples)
density = np.exp(kde.score_samples(samples))
return samples,density
def generate_branches(num_samples=5000,
branch_width=5.0,
branch_length=5,
ndim=2):
h_corners = np.array(hypercube(ndim))*branch_length
branches = np.array(h_corners)
for i in range(20):
branches = np.vstack((branches, h_corners*(i+2)))
branches = np.vstack((branches,np.zeros(branches.shape[1])))
kde = KernelDensity(bandwidth=branch_width)
kde.fit(branches)
samples = kde.sample(num_samples)
density = np.exp(kde.score_samples(samples))
return samples,density
| mit |
rahuldhote/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
davidsamu/seal | seal/analysis/updown.py | 1 | 2020 | # -*- coding: utf-8 -*-
"""
Functions to perform up-down state analysis.
@author: David Samu
"""
import numpy as np
import pandas as pd
import neo
from quantities import s, ms
from elephant.statistics import time_histogram
from seal.util import util, ua_query, constants
def combine_units(spk_trs):
"""Combine spikes across units into a single spike train."""
t_start = spk_trs[0].t_start
t_stop = spk_trs[0].t_stop
comb_spks = np.sort(np.concatenate([np.array(spk_tr)
for spk_tr in spk_trs]))
comb_spk_tr = neo.core.SpikeTrain(comb_spks*s, t_start=t_start,
t_stop=t_stop)
return comb_spk_tr
def get_spike_times(UA, recs, task, tr_prd='whole trial', ref_ev='S1 on'):
"""Return spike times ."""
# Init.
if recs is None:
recs = UA.recordings()
# Get spikes times for period for each recording.
lSpikes = []
for rec in recs:
print(rec)
uids = UA.utids(tasks=[task], recs=[rec]).droplevel('task')
spks = ua_query.get_spike_times(UA, rec, task, uids, tr_prd, ref_ev)
lSpikes.append(spks)
Spikes = pd.concat(lSpikes)
return Spikes
def get_binned_spk_cnts(comb_spk_tr, prd, binsize):
"""
Return binned spike counts during period for spike train
(tyically combined across units).
"""
tstart, tstop = constants.fixed_tr_prds.loc[prd]
# Calculate binned spike counts.
lspk_cnt = [np.array(time_histogram([spk_tr], binsize, tstart,
min(tstop, spk_tr.t_stop)))[:, 0]
for spk_tr in comb_spk_tr]
tvec = util.quantity_arange(tstart, tstop, binsize).rescale(ms)
# Deal with varying number of bins.
idxs = range(np.array([len(sc) for sc in lspk_cnt]).min())
lspk_cnt = [sc[idxs] for sc in lspk_cnt]
tvec = tvec[idxs]
# Create trial x time bin spike count DF.
spk_cnt = pd.DataFrame(np.array(lspk_cnt), columns=np.array(tvec))
return spk_cnt
| gpl-3.0 |
pkruskal/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
ldirer/scikit-learn | sklearn/setup.py | 69 | 3201 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
pprett/statsmodels | statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 1 | 5110 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
import numpy as np
import statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print n_cdf
print
print (x<np.array(xli[0])).all(-1).mean(0)
print (x[...,None]<xliarr).all(1).mean(0)
print mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000)
print mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000)
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print mv2m.mean
print mv2m.cov
mv2c = mvn3.conditional(np.array([0,1]), [0])
print mv2c.mean
print mv2c.cov
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print mv2c.mean
print mv2c.cov
import statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print res.model.predict(np.array([1,0,0]))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print mv2c.mean
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print res.model.predict(np.array([1,1,1]))
print mv2c.mean
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print mvt3_cdf0
print (xt<np.array(a)).all(-1).mean(0)
print 'R', 0.3026741 # "error": 0.0004832187
print 'R', 0.3026855 # error 3.444375e-06 with smaller abseps
print 'diff', mvt3_cdf0 - 0.3026855
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print mvt3_cdf1
print (xt<np.array(a)).all(-1).mean(0)
print 'R', 0.1946621 # "error": 0.0002524817
print 'R', 0.1946217 # "error:"2.748699e-06 with smaller abseps
print 'diff', mvt3_cdf1 - 0.1946217
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import statsmodels.sandbox.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.